mirror of
https://github.com/huggingface/transformers.git
synced 2025-10-30 16:44:35 +08:00
Compare commits
2401 Commits
fix-pipeli
...
v4.31.0
| Author | SHA1 | Date | |
|---|---|---|---|
| e42587f596 | |||
| 44006546f0 | |||
| 49eb357564 | |||
| f42a35e611 | |||
| c21c3737c1 | |||
| 054e802914 | |||
| c965d30279 | |||
| e4a52b6a15 | |||
| 4f08887053 | |||
| 38dfb86958 | |||
| 18d42bfd23 | |||
| 9771ad33be | |||
| 8ba26c18cf | |||
| 5bb4430edc | |||
| 1023705440 | |||
| a865b62e07 | |||
| 50726f9ea7 | |||
| 91d7df58b6 | |||
| f32303d519 | |||
| 9d7a0871e2 | |||
| 0866705022 | |||
| c0ca73dc98 | |||
| f9a711df4a | |||
| eebce4470c | |||
| 34d9409427 | |||
| 9342c8fb82 | |||
| 717dadc6f3 | |||
| e367a9770f | |||
| e538189931 | |||
| 6ba4d5de3a | |||
| 21946a8cf4 | |||
| 1f6f32c243 | |||
| 906afa1d5c | |||
| f1732e1374 | |||
| cfc8a05305 | |||
| 395e566a42 | |||
| 0284285501 | |||
| 4f85aaa6c9 | |||
| fc9e387dc0 | |||
| 430a04a75a | |||
| 6e2f069650 | |||
| 7edc33ac7a | |||
| 45025d92f8 | |||
| 6aadb8d016 | |||
| 4c0e251dc7 | |||
| 253d43d46d | |||
| 1be0145d6a | |||
| bb13a92859 | |||
| aac4c79968 | |||
| 33aafc26ee | |||
| 3d8697261e | |||
| 2642d8d04b | |||
| 5739726fcc | |||
| 2489e380e4 | |||
| 8a5e8a9c2a | |||
| b15343de6f | |||
| b3ab3fac1d | |||
| 35eac0df75 | |||
| a074a5d34d | |||
| 2541108564 | |||
| 30ed3adf47 | |||
| abaca9f943 | |||
| f614b6e393 | |||
| 4957294270 | |||
| fb78769b9c | |||
| fded6f4186 | |||
| bbf3090848 | |||
| 66a378429d | |||
| 392740452e | |||
| fb3b22c3b9 | |||
| 9a5d468ba0 | |||
| 3df3b9d4bf | |||
| 050ef14516 | |||
| bd9dfc23b9 | |||
| ee339bad01 | |||
| d211a84aca | |||
| 469f4d0c29 | |||
| b19c7b5ccf | |||
| ea9caf7aba | |||
| f3e96235a3 | |||
| a3b402ff9a | |||
| 4e94566018 | |||
| cd4584e3c8 | |||
| f4e4b4d0e2 | |||
| 9934bb1f42 | |||
| 4b26a61631 | |||
| 6eedfa6dd1 | |||
| fc7ce2ebc5 | |||
| e16191a8ac | |||
| 799df10aef | |||
| 66ded238cd | |||
| d51aa48a76 | |||
| 299aafe55f | |||
| 49e812d12b | |||
| 134caef31a | |||
| 3441ad7d43 | |||
| 78a2b19fc8 | |||
| fd8dcd0953 | |||
| b52a03cd3b | |||
| 9e28750287 | |||
| 232c898f9f | |||
| c817bc44e2 | |||
| 8c4471d1fc | |||
| b324557aac | |||
| 77db28dc52 | |||
| 1c1c90756d | |||
| 2dc5e1a120 | |||
| 4f1b31c2ee | |||
| 1fd52e6e60 | |||
| 63cc30e71b | |||
| ae454f41d4 | |||
| 10c2ac7bc6 | |||
| 66954ea25e | |||
| fd6735102a | |||
| faae8d8255 | |||
| 33b5ef5cdf | |||
| c70c88a268 | |||
| 903b97d8df | |||
| b0651655be | |||
| 6c57ce1558 | |||
| c5e29d4381 | |||
| daccde143d | |||
| 11cb6e0f7e | |||
| e84bf1f734 | |||
| 12240925cf | |||
| 89b6ee49fd | |||
| 04f46a22d8 | |||
| 462f77cbce | |||
| 8e5d1619b3 | |||
| 53194991e9 | |||
| fb6a62762f | |||
| 38db04ece0 | |||
| 7d150d68ff | |||
| 4e8929dcbb | |||
| 06910f5a76 | |||
| bcf02ec701 | |||
| 6fe8d198e3 | |||
| 0863436b6c | |||
| 4abd3ee479 | |||
| 239ace152b | |||
| ac19871ce2 | |||
| 5f3efdf762 | |||
| 43479ef98f | |||
| 68c92981ff | |||
| 7b4e3b5b40 | |||
| c9fd49853f | |||
| 850cf4af0c | |||
| 9895670e95 | |||
| 5757923888 | |||
| c2aa5e17e4 | |||
| 3ca022238b | |||
| 195a9e5bdb | |||
| c8aff1d3e6 | |||
| 914289ac4b | |||
| 892399c5ff | |||
| be2d9f2e47 | |||
| 3b84d86b57 | |||
| 868363abb9 | |||
| 8e164c5400 | |||
| 1e9da2b0a6 | |||
| 8767958fc1 | |||
| a6f37f8879 | |||
| 2898fd3968 | |||
| 5e9f6752ee | |||
| a28325e25e | |||
| c036c814f4 | |||
| 468aed39af | |||
| ea91c2adca | |||
| feb83521ec | |||
| 2c977e4a90 | |||
| 2834c17ad2 | |||
| b6295b26c5 | |||
| a1c4b63076 | |||
| 754f61ca05 | |||
| 8f2ef52fb6 | |||
| 3ce3385c47 | |||
| ebb62e8880 | |||
| 652ece0710 | |||
| 22fe73c378 | |||
| 7e03e46934 | |||
| 6ce6d62b6f | |||
| 127e81c272 | |||
| cd927a4736 | |||
| 0f968ddaa3 | |||
| 1a6fb930fb | |||
| 285a48011d | |||
| 1815d1865e | |||
| 4c6e429589 | |||
| 16c7b16a0a | |||
| 5f0801d174 | |||
| 45f71d793d | |||
| ad78d9597b | |||
| cb8f675510 | |||
| eb849f6604 | |||
| b0513b013b | |||
| e5c760d636 | |||
| c2882403c4 | |||
| 83dc5762e7 | |||
| 297d769d0e | |||
| 6950f70b38 | |||
| 7feba74400 | |||
| 0527c1c0ea | |||
| dc4449918d | |||
| a6b4d1ad83 | |||
| 6c1344444a | |||
| f924df3c7e | |||
| c23d131eab | |||
| 56efbf4301 | |||
| 183f442ba8 | |||
| 0875b2509a | |||
| cfc838dd4d | |||
| c5454eba9e | |||
| 20273ee214 | |||
| c003c8cb52 | |||
| 52c4276e44 | |||
| 7e71eb2ef7 | |||
| a4de24f691 | |||
| 7761b1893a | |||
| 5fca839fef | |||
| 3b5a56e595 | |||
| 0b259a3b7e | |||
| 691b60db90 | |||
| 17e3e7d686 | |||
| 3c124df579 | |||
| 881c0df952 | |||
| ee88ae5994 | |||
| 9138995025 | |||
| bdfd57d1d1 | |||
| 096f2cf126 | |||
| 61ffdeba38 | |||
| 3403712958 | |||
| 896a58de15 | |||
| 62d71f4083 | |||
| ba3fb4b8d7 | |||
| 0b7b4429c7 | |||
| 6134b9b4c7 | |||
| e45bc14350 | |||
| 1a113fcf65 | |||
| c3ca346b49 | |||
| 4124a09f8b | |||
| 01b55779d3 | |||
| 6a081c512a | |||
| 604a21b1e6 | |||
| e6122c3f40 | |||
| 6cd34d451c | |||
| 372f50030b | |||
| a611ac9b3f | |||
| 33196b459c | |||
| 7504be35ab | |||
| 6793f0cfe0 | |||
| 1609a436ec | |||
| 0c3fdccf2f | |||
| 26a2ec56d7 | |||
| 860d11ff7c | |||
| a04ebc8b33 | |||
| 8978b696d7 | |||
| c4fec38bc7 | |||
| 4626df5077 | |||
| eac8dede83 | |||
| 91b62f5a78 | |||
| 6ab045d6fe | |||
| b89fcccd44 | |||
| e0603d894d | |||
| 233113149b | |||
| 3bd1fe4315 | |||
| b979a2064d | |||
| e64d99fa6b | |||
| cf561d7cf1 | |||
| b1ea6b4bf5 | |||
| 7bb6933b9d | |||
| 4ed075280c | |||
| 695928e1e5 | |||
| 3723329d01 | |||
| 3e142cb0f5 | |||
| f91810da88 | |||
| fdd78d9153 | |||
| 74b846cacf | |||
| d7389cd201 | |||
| 70c7994095 | |||
| 41a8fa4e14 | |||
| 4da84008dc | |||
| 0675600a60 | |||
| e5dd7432e7 | |||
| 4fe9716a79 | |||
| f7d80cb3d2 | |||
| 08ae37c820 | |||
| ebd94b0f6f | |||
| dc42a9d76f | |||
| 60b69f7de2 | |||
| 97527898da | |||
| dadc9fb427 | |||
| a9cdb059a8 | |||
| 9f81f4f6dd | |||
| 535f92aea3 | |||
| ba64ec07bb | |||
| 93f73a3848 | |||
| e26c6f03be | |||
| 8f093fb799 | |||
| 0d217f428f | |||
| deff5979fe | |||
| 061580c82c | |||
| 12bb853ccd | |||
| d0d1632958 | |||
| a7501f6fc6 | |||
| 5af3a1aa48 | |||
| 62fe753325 | |||
| 847b47c0ee | |||
| b8fe259f16 | |||
| 707023d155 | |||
| f2b918356c | |||
| be10092e63 | |||
| 03585f3734 | |||
| a6d05d55f6 | |||
| e2972dffdd | |||
| 535542d38d | |||
| 2e2088f24b | |||
| 9322c24476 | |||
| a73883ae9e | |||
| 8b169142f8 | |||
| 71a114d3e0 | |||
| 8c5f306719 | |||
| 2200bf7a45 | |||
| 0f23605094 | |||
| 5fa0a1b23b | |||
| ba695c1efd | |||
| c3572e6bfb | |||
| 5eb3d3c702 | |||
| d1c039e398 | |||
| 2c887cf8e0 | |||
| 12298cb65c | |||
| ef010071ee | |||
| 89b00eef94 | |||
| 5c9394b54c | |||
| 1fc832b454 | |||
| 092c14c37d | |||
| 4795219228 | |||
| a1160185ff | |||
| 6b548129b1 | |||
| 6daf7c311b | |||
| 1e4a7737ed | |||
| 52972e70c7 | |||
| 612b2a1a6d | |||
| f1660d7e23 | |||
| 60825f2c6e | |||
| 02d255db26 | |||
| bc9ecef942 | |||
| 4a55e47877 | |||
| cbf6bc2350 | |||
| 7203ea6797 | |||
| 072188d638 | |||
| ff4c0fc7d2 | |||
| a717e0318c | |||
| b8935980a2 | |||
| 02fe3af275 | |||
| d924390d5b | |||
| c2e3fa0b2a | |||
| 6307312dfc | |||
| 7da3ce04a6 | |||
| c938597657 | |||
| 7631db0fdc | |||
| 17846646f2 | |||
| 649ffbf575 | |||
| 2872f9671b | |||
| 44bd590a29 | |||
| 7824fa431e | |||
| b4919cb520 | |||
| b143019005 | |||
| 5176dc2310 | |||
| 460b844360 | |||
| 3c3108972a | |||
| 539e2281cd | |||
| bacaab1629 | |||
| 167a0d8f87 | |||
| c9cf337772 | |||
| 8940d315aa | |||
| 2fdba73a99 | |||
| dcb5e18c9e | |||
| 07c54413ac | |||
| 5dfd407b37 | |||
| f49a3453ca | |||
| c62b01d0b0 | |||
| e03a9cc0cd | |||
| d1fa349e78 | |||
| dc67da0182 | |||
| 8088ca4185 | |||
| 5929f86ebb | |||
| 857d4e1c87 | |||
| 9193188276 | |||
| af2c36793f | |||
| 9a35a7b9e1 | |||
| 9603ef890a | |||
| fabe17a726 | |||
| 6affd9cd7c | |||
| 4aa13224a5 | |||
| 3ff443a6d9 | |||
| d13021e35f | |||
| c608b8fc93 | |||
| 0b3d092f63 | |||
| 8714b964ee | |||
| 404d925384 | |||
| c63bfc3023 | |||
| 55451c66ce | |||
| 7adce8b532 | |||
| 84bac652f3 | |||
| e42869b091 | |||
| 8f915c450d | |||
| d99f11e898 | |||
| d68d6665f9 | |||
| 68d53bc717 | |||
| 0963a2508b | |||
| 00f6ba0e7e | |||
| a73b1d59a3 | |||
| 88f50a1e89 | |||
| 9fea71b465 | |||
| 38dbbc2640 | |||
| 03db591047 | |||
| 0b774074a5 | |||
| 015829e6c4 | |||
| 1cf148a6aa | |||
| 9f0646a555 | |||
| de9255de27 | |||
| 6451ad0471 | |||
| af2aac51fc | |||
| 58022e41b8 | |||
| 6fc0454b2f | |||
| 0623f08e99 | |||
| 62ba64b90a | |||
| 867316670a | |||
| 192aa04783 | |||
| a077f710f3 | |||
| 2faa09530b | |||
| ac224dee90 | |||
| af45ec0a16 | |||
| 4b6a5a7caa | |||
| 17a55534f5 | |||
| edf7772826 | |||
| e724246935 | |||
| b7b729b38d | |||
| d61d747627 | |||
| 4d9b76a80f | |||
| 8d28dba35d | |||
| f67dac97bd | |||
| d685e330b5 | |||
| 4b0e7ded1c | |||
| f04f549bae | |||
| 3416bba7c7 | |||
| 6e4bc67099 | |||
| 7d4fe85ef3 | |||
| 06c28cd0fc | |||
| f0a2a82ab4 | |||
| e45e756d22 | |||
| 9850e6ddab | |||
| 75bbf20bce | |||
| 89159651ba | |||
| d8222be57e | |||
| 814de8fac7 | |||
| 3d7baef114 | |||
| 50a56bedb6 | |||
| d2d8822604 | |||
| 28aa438cd2 | |||
| f8b2574416 | |||
| 767e6b5314 | |||
| b4698b7ef2 | |||
| 2eaaf17a0b | |||
| 796162c512 | |||
| 9d73b92269 | |||
| 33687a3f61 | |||
| 003a0cf8cc | |||
| 357f281ba2 | |||
| de5f86e59d | |||
| 3d57404464 | |||
| 6b7d6f848b | |||
| 876d9a32c6 | |||
| 42baa58f90 | |||
| 71a5ed3433 | |||
| 1fe1e3caa4 | |||
| 9e8d7066e6 | |||
| abf691aac0 | |||
| b687af0b36 | |||
| 527ab894e5 | |||
| aa30cd4f3f | |||
| 9bf72ae564 | |||
| ecc05f8c1e | |||
| e30ceae07b | |||
| 2f424d7979 | |||
| e69feab8a1 | |||
| b191d7db44 | |||
| 26a06814a1 | |||
| 6f72e71f97 | |||
| 5de2a6d5e5 | |||
| 4ddd9de9d3 | |||
| fe34486f12 | |||
| 7bbdfd7b24 | |||
| 29294b0e68 | |||
| 12ec7f0c20 | |||
| 6397b7f008 | |||
| 3658488ff7 | |||
| 9728f1134b | |||
| 1f2c00d671 | |||
| 3cb9309024 | |||
| 847e5691a6 | |||
| 389bdba618 | |||
| b455ad0a64 | |||
| db4d765249 | |||
| 2aa0cc2c2a | |||
| 21bd3be172 | |||
| 1c460a5273 | |||
| 8aa8513f71 | |||
| 3cf01b2060 | |||
| 2acedf4721 | |||
| a7920065f2 | |||
| b7b81d9344 | |||
| 40ed18ae15 | |||
| f69589d1bc | |||
| 167aa76cfa | |||
| ffad4f1373 | |||
| 2406dbdcfa | |||
| 21f7e81b6b | |||
| cf43200861 | |||
| db13634183 | |||
| c618ab4fab | |||
| 5777c3cb3f | |||
| 8cfae44093 | |||
| f2d2880bbb | |||
| aea7b23b57 | |||
| a8732e09bb | |||
| 0f2c738207 | |||
| a574de302f | |||
| 939a65aba7 | |||
| cf9e7cb079 | |||
| 45e3d6496a | |||
| ea0eb15649 | |||
| 5ba0c332b6 | |||
| ebb649a4e3 | |||
| a2789adddf | |||
| 3d764fe860 | |||
| 3d3c7d4213 | |||
| 22a0769933 | |||
| a6c9643ce7 | |||
| 43f146208e | |||
| 46d2468695 | |||
| ca3df9f0cf | |||
| 17d0290e57 | |||
| d712ebd86d | |||
| 4e244b8817 | |||
| 918a06e25d | |||
| 9cf4a8b456 | |||
| 5b1ad0eb73 | |||
| bbbc5c15d4 | |||
| 8a58809312 | |||
| 130e154291 | |||
| 2922e394e3 | |||
| 52d516c3a9 | |||
| 728c5e82cc | |||
| 770a1275d3 | |||
| 466af1a356 | |||
| 21741e8c7e | |||
| d765717c76 | |||
| 80ca924709 | |||
| ba6815e824 | |||
| c2393cad08 | |||
| ee3be05310 | |||
| 8f76dc8e5a | |||
| 41d47db90f | |||
| 569a97adb2 | |||
| c94f7a1cce | |||
| 380280d994 | |||
| 96ae83a0d2 | |||
| 65b885027a | |||
| 81a73fa638 | |||
| 2958b55fe5 | |||
| cf11493dce | |||
| 79743cedab | |||
| 291c5e9b25 | |||
| 65d7b21b77 | |||
| ef3e25ce4e | |||
| a3975f94f3 | |||
| 7f8b909189 | |||
| 8c8744a94a | |||
| c045249049 | |||
| 364ced6893 | |||
| 273f5ba026 | |||
| ba71d9e94c | |||
| 786b9cf5ca | |||
| 4eea25b445 | |||
| 662751b4e2 | |||
| f76fb3aeea | |||
| 71b19ee251 | |||
| ab96bf0294 | |||
| 83eda6435e | |||
| d51296d9c2 | |||
| 6a6225beab | |||
| 5d02e6bd20 | |||
| 436dc779a5 | |||
| 125516977d | |||
| dee673232b | |||
| e1eb3efd02 | |||
| b3bbe1bdb6 | |||
| b92abfa6e0 | |||
| f82ee109e6 | |||
| ca26699f37 | |||
| 9088fcae82 | |||
| b2846afda8 | |||
| 6d6b7c923c | |||
| 0c65fb7cfa | |||
| eb5b5ce641 | |||
| 42017d82ba | |||
| f93509b114 | |||
| 5f26a23d03 | |||
| b203de7c86 | |||
| 4f05bbf165 | |||
| 996f127a90 | |||
| d3cbc997a2 | |||
| 91f4c84a19 | |||
| 3335724376 | |||
| 366a8ca09e | |||
| 69ee46243c | |||
| a0c0a78233 | |||
| 627f44799a | |||
| 650a71e157 | |||
| c34a525d2f | |||
| b4d4d6fe87 | |||
| 9a50cb6195 | |||
| 1a8f61110e | |||
| 51ae566511 | |||
| e02a8065e0 | |||
| 7f91950901 | |||
| 431b04d8c4 | |||
| 006da469dd | |||
| 188a8bfccc | |||
| 94056b57be | |||
| fd6970bc56 | |||
| 843fdf2e42 | |||
| bbfb9fc22b | |||
| dbc12269ed | |||
| 6f8a02844a | |||
| ef0c380c12 | |||
| ef42c2c487 | |||
| 312b104ff6 | |||
| fc6c8b0eaa | |||
| 17083b9b84 | |||
| 40082d598b | |||
| 77412343c8 | |||
| 1b9c352e55 | |||
| 01734dba84 | |||
| b369e507aa | |||
| 516dc6305f | |||
| c8f2c5c56e | |||
| 3341bb41cd | |||
| 57ffd8ab4c | |||
| 83b38fbea8 | |||
| 510ad0a8b8 | |||
| adb0760b5f | |||
| 3b74889e8f | |||
| 5eeb556484 | |||
| 90e8263d91 | |||
| 78b7debf56 | |||
| b6933d76d2 | |||
| b0a78091a5 | |||
| e3ee45aa54 | |||
| 441658dd6c | |||
| ca7eb27ed5 | |||
| fbe0178f08 | |||
| c4e32e206f | |||
| ee4bc07474 | |||
| 56b8d49ddf | |||
| b53004fdce | |||
| 3a08dc63fd | |||
| 2a16d8b275 | |||
| a0bd464776 | |||
| ce31e3c8bf | |||
| b61d5b47f6 | |||
| 4b6aecb48e | |||
| 3ff89f29f5 | |||
| 805db1fe13 | |||
| 9ade58f055 | |||
| 4baa34c18f | |||
| c6c6658499 | |||
| f31a510bb3 | |||
| 2b0c924568 | |||
| bcedd0a471 | |||
| 85e3d7b6a0 | |||
| b8648290d2 | |||
| f9426eeb94 | |||
| 92601d2eb1 | |||
| 78941b9fe5 | |||
| 9884862383 | |||
| 95cf3725b4 | |||
| 487f132a6f | |||
| 549e5f9f23 | |||
| 9062d1bab2 | |||
| 849367ccf7 | |||
| dfeb5aa6a9 | |||
| b6865b9bef | |||
| d337631b91 | |||
| c2c99dc7ef | |||
| 0bf34b1c9f | |||
| 4d0ea3d269 | |||
| 521a8ffa53 | |||
| 4893d919f1 | |||
| 9b435204b1 | |||
| cf7baf4060 | |||
| a0e7332839 | |||
| 88399476c3 | |||
| 27b66bea01 | |||
| d65b14ed67 | |||
| 614e191c4d | |||
| 1933231a0a | |||
| a4908da04e | |||
| e28fff18b8 | |||
| 9435cc6670 | |||
| 3042c63a95 | |||
| 0083b149e9 | |||
| 8b129030cb | |||
| 4331923b97 | |||
| d0b5002378 | |||
| 00bc6e2067 | |||
| 304aacac90 | |||
| ba0dc54576 | |||
| a72b82ebe6 | |||
| 20ac86c6f1 | |||
| 4c2b4c4c3c | |||
| 6dc2474727 | |||
| 4e1522d65a | |||
| d95045717e | |||
| a0ae2310ec | |||
| 5427250351 | |||
| 81c1910c86 | |||
| 0a570dbd2e | |||
| d4d628462f | |||
| f0f5e28f82 | |||
| 60f9649653 | |||
| 073baf7f22 | |||
| e4a97f82bf | |||
| 7701716efc | |||
| 8f20e61c85 | |||
| 345a1371d8 | |||
| 503e8c8b32 | |||
| 6e32959329 | |||
| d6f1da6b71 | |||
| 74c55ab9e5 | |||
| 69f2d5386b | |||
| b5f06d6c59 | |||
| 3f6a4b5bd7 | |||
| edb6d950cb | |||
| 84097f6d38 | |||
| 093be36f6c | |||
| 975159bb61 | |||
| 2fbd6df81c | |||
| df017c3ccc | |||
| 137eb8e663 | |||
| 3d3204c025 | |||
| d04ec99bec | |||
| 4d10de55b4 | |||
| 7579a52b55 | |||
| 5166c30e29 | |||
| b950c38565 | |||
| d00997e66c | |||
| eddf9eeca0 | |||
| 5600e6f3ba | |||
| 874c7caf19 | |||
| 587a19c725 | |||
| 3d852da2db | |||
| 75444551c0 | |||
| d03d8c720f | |||
| 64ec802e50 | |||
| 3db2e40422 | |||
| 1e1cb6f8e5 | |||
| 9fdf158aa0 | |||
| ec93b895c1 | |||
| 3080fb714f | |||
| 435abb22cb | |||
| aab14120d4 | |||
| 397720fb14 | |||
| 8a817e1eca | |||
| 515d6a551e | |||
| 5764e67cee | |||
| f143037789 | |||
| e5f3487190 | |||
| 6dc0a849b7 | |||
| 3b61d2890d | |||
| 91d6a593f1 | |||
| 4116d1ec75 | |||
| 10dd3a7d1c | |||
| aa43a76538 | |||
| aa4316757d | |||
| d50db469c0 | |||
| a438a0941c | |||
| 4cfe328bae | |||
| cb47293eba | |||
| 2da73f6302 | |||
| 4060d6857e | |||
| 474bf508df | |||
| 898efca72a | |||
| a8aad0ec93 | |||
| 06bab00338 | |||
| 648bd5a8aa | |||
| 5f97bbc124 | |||
| 4603fe9b1f | |||
| 337225ec1c | |||
| 6bd8ae2640 | |||
| c582e8aad0 | |||
| 84a6570e7b | |||
| 5bb4ec6233 | |||
| 5f09219400 | |||
| 5f9b825c89 | |||
| aec10d162f | |||
| 78cda46f17 | |||
| 90247d3e01 | |||
| 1ebc1dee92 | |||
| 42288269c3 | |||
| ac2bc50a10 | |||
| dacd34568d | |||
| 03462875cc | |||
| 50caa20628 | |||
| e13d6ef7dc | |||
| cd3e0211a6 | |||
| f8c43c9425 | |||
| 5269718cb7 | |||
| ea7b0a539a | |||
| 4d2c52e830 | |||
| 2237127a6c | |||
| 626c1b8af1 | |||
| abbc96a214 | |||
| 18c894814e | |||
| 76d24f1a83 | |||
| 28f26c107b | |||
| fb3aa06cb6 | |||
| 20e54e49fa | |||
| 895ae3b5c4 | |||
| daf53241d6 | |||
| 06e737fbaf | |||
| c8df3900c8 | |||
| 53c710d17b | |||
| d2ffc3fc48 | |||
| 9af845afc2 | |||
| 66b15efb20 | |||
| 390e121fb5 | |||
| bfb3925fcb | |||
| a6752a7d3c | |||
| 410b61ad7e | |||
| 9dfd6a4baa | |||
| 90ce374d14 | |||
| d85bf95436 | |||
| 656d41ab4c | |||
| 32b08742a5 | |||
| 4def2fe969 | |||
| 7df1343292 | |||
| 8eb38f638d | |||
| 95e7057507 | |||
| 89087597ba | |||
| 7ade6ef7d4 | |||
| 51007976ec | |||
| 888c4a2ae0 | |||
| 50f82e1282 | |||
| ce06e4780e | |||
| 9858195481 | |||
| 10fab90fe2 | |||
| 1306b7d3ae | |||
| d87ef00c31 | |||
| 370f0ca18c | |||
| 523ca4e016 | |||
| 17503b00ea | |||
| b76e6ebd44 | |||
| 5a71977b8b | |||
| fe1f5a639d | |||
| 1b1867d86b | |||
| ff73deeb0e | |||
| 06b05d4575 | |||
| 0224aaf67f | |||
| 28c19ab58d | |||
| 4c01231e67 | |||
| 151425ddb2 | |||
| 6daa9cb515 | |||
| f74b40208d | |||
| 14fc1a2467 | |||
| 3876fc6839 | |||
| 98597725f1 | |||
| 870d91fb89 | |||
| e0921c6b53 | |||
| 656e869a45 | |||
| 6db23af50c | |||
| 3f96e0b4e4 | |||
| f33419261a | |||
| b1b3dc3e52 | |||
| 117a0f6afa | |||
| fc1ba6fd11 | |||
| 14d5b2b645 | |||
| f2cc8ffdaa | |||
| 1de8ce9ee1 | |||
| d59034ff6f | |||
| ee8e80a060 | |||
| c7ec71baf5 | |||
| ed67286465 | |||
| 6a02e98074 | |||
| 09a9888fe9 | |||
| d0b83fe2e1 | |||
| fa01127a67 | |||
| 321b0908dd | |||
| 2c22bc79c2 | |||
| 12d51db243 | |||
| 48706c7178 | |||
| 0aa1153ffb | |||
| 1670be4bde | |||
| 1564189298 | |||
| e577bd0f13 | |||
| 176ceff91f | |||
| 126eafe396 | |||
| 12f1a3bb3c | |||
| d5239bab5b | |||
| f49b0762a1 | |||
| 4861c25817 | |||
| 2a91a9ef66 | |||
| 0684284911 | |||
| 6c640f098a | |||
| 861ff890d6 | |||
| 11fd2c773b | |||
| edb704b26e | |||
| 48fbd8fa2e | |||
| 900677487d | |||
| fc5b7419d4 | |||
| 5f3ea66bc0 | |||
| a515d0a77c | |||
| 98268b2e76 | |||
| fa2bdffc5d | |||
| 28fcf00607 | |||
| 871598be55 | |||
| 00b5887b94 | |||
| ad5e9b6c6a | |||
| 1905384fd5 | |||
| 41a2f3529c | |||
| 159ff3342c | |||
| c14d31294e | |||
| 4169dc84bf | |||
| a17841ac49 | |||
| 80d1319e1b | |||
| 4c33a0c4fc | |||
| d7a4f5becc | |||
| cab048fb35 | |||
| aecbcb3680 | |||
| 4e441e529c | |||
| a60010566a | |||
| 9419f144ad | |||
| a55a822adf | |||
| 7d25c9c81e | |||
| 1194c3e315 | |||
| c0f99b4d2e | |||
| 9eae4aa576 | |||
| 559a45d1dc | |||
| db803b6919 | |||
| c612628045 | |||
| 3a9464bd30 | |||
| 6fc44656b4 | |||
| d143087d18 | |||
| 516077b3b0 | |||
| da68fd691c | |||
| 0fe6c6bdca | |||
| d5de578c22 | |||
| 165dd6dc91 | |||
| 349e1242d9 | |||
| 11426641dc | |||
| 4d7a5b5ba3 | |||
| 228792a9dc | |||
| f0aeb1be17 | |||
| 154c6bb7ac | |||
| c15f937581 | |||
| cd73b9a8c1 | |||
| 2194943a34 | |||
| 4c295a265b | |||
| 97440e9c75 | |||
| 173193ccd0 | |||
| 5e89a435c8 | |||
| b844f8a9ab | |||
| 55dae94c0c | |||
| 8894b81742 | |||
| 9b494a1537 | |||
| 8252e24a77 | |||
| 33f4cb1093 | |||
| fab1de72f1 | |||
| 8d9c3836be | |||
| b29fd6971d | |||
| ae5fc2db87 | |||
| ed57c979b9 | |||
| 32ff06403d | |||
| 3ec7a47664 | |||
| 19ade2426a | |||
| 057e1d7473 | |||
| f02e3a2b18 | |||
| 738944c9ee | |||
| 53155b520d | |||
| 0e708178ed | |||
| f6b80a0139 | |||
| d324b70f00 | |||
| 7dcd8703ef | |||
| 5506d04969 | |||
| 03966cacf9 | |||
| 66d1eee682 | |||
| 8cfc6678da | |||
| 204737fcc5 | |||
| d5c2c71c0f | |||
| c746eb1603 | |||
| cae78c46d6 | |||
| cfab34e188 | |||
| 500fce073b | |||
| a0cbbba31f | |||
| 88dae78f4d | |||
| 3a7f5fa9d2 | |||
| 6587125c0a | |||
| 01203475c9 | |||
| 57f25f4b7f | |||
| 0fa46524ac | |||
| b79607656b | |||
| c0fa2aa0b8 | |||
| e8cc02555e | |||
| a92e0ad2e2 | |||
| 502fec779b | |||
| ec9b18f62d | |||
| aef488c503 | |||
| 59b9351b78 | |||
| 506e7c6361 | |||
| 053c2153f8 | |||
| 5a9eb31477 | |||
| ff20f9cf36 | |||
| 61f79b2986 | |||
| 80e3b36361 | |||
| ef28df0572 | |||
| 73fdc8c5b4 | |||
| 8b05ace014 | |||
| d62e7d8842 | |||
| f48d3314e4 | |||
| 0f68a7f408 | |||
| fd3eb3e3cd | |||
| 12febc20db | |||
| 5fd4e3c87c | |||
| 48bef3a734 | |||
| 4e94c6c008 | |||
| 8e6c34b390 | |||
| 4ccaf268fb | |||
| 8472a224fb | |||
| 0558914dff | |||
| 0dcb46e7a4 | |||
| 89a0a9eace | |||
| 5990743fdd | |||
| d35f729649 | |||
| 67c2dbdb54 | |||
| 86c7931a70 | |||
| d0b942d1dc | |||
| 48327c5718 | |||
| 5a2b77a6c1 | |||
| 330d8b991f | |||
| c07a02a4b7 | |||
| 7bd8650512 | |||
| fb0a38b4f2 | |||
| 8ac29fe090 | |||
| da005253b8 | |||
| 786092a35e | |||
| 43efd7cb13 | |||
| 89f0fda5d3 | |||
| cf0af9a31b | |||
| c4bf6f38bd | |||
| 466144d440 | |||
| a48310de47 | |||
| 60d51ef512 | |||
| cf601b902f | |||
| bec075612a | |||
| 3028b20a71 | |||
| 074490b2c2 | |||
| 314cdf7c25 | |||
| f251441387 | |||
| 675d2a5a00 | |||
| 00934026a4 | |||
| 42f8f76402 | |||
| 53218671d9 | |||
| af1c864cdc | |||
| 33d033d694 | |||
| 97a3d16a69 | |||
| 5110e5748e | |||
| fb366b9a2a | |||
| da3ba3a167 | |||
| a88a4dae19 | |||
| 4c5c0af7e5 | |||
| 464d420775 | |||
| 0041be5b3d | |||
| 09922da4a7 | |||
| 52a57f7c7c | |||
| 1485bd9c02 | |||
| 1c4a9acc73 | |||
| 7c4999e495 | |||
| 16121bae5c | |||
| 42ad693b7b | |||
| 737681477c | |||
| 7b0e2cfdfb | |||
| f7329751fe | |||
| b7036f4912 | |||
| ebdb185bef | |||
| c52c5282ef | |||
| 085bf5c1fe | |||
| c6318c3788 | |||
| 3b22bfbc6a | |||
| b45192ec47 | |||
| 7f5ad6c35b | |||
| ff88703501 | |||
| cdddfbffa1 | |||
| 6c2ad00c46 | |||
| 2beabd24f0 | |||
| 101a6cd276 | |||
| ba9e0191de | |||
| f780557a34 | |||
| 3a35937ede | |||
| 618697ef53 | |||
| 5b85add7d5 | |||
| 1c801d65eb | |||
| e16cbe88ae | |||
| d979cf6efd | |||
| 987972377d | |||
| 54ee56b15b | |||
| a096eaca65 | |||
| 6cb5132a7f | |||
| 8def252de2 | |||
| e61081e725 | |||
| ef74e7e783 | |||
| c1db6a3bab | |||
| 6652e7da0d | |||
| dd3a0580a6 | |||
| 0768c5e274 | |||
| 4c14c1f47b | |||
| d0876a095f | |||
| 0c883766bd | |||
| 102b5ff4a8 | |||
| 32e3466d38 | |||
| b90fbc7e0b | |||
| 2f320661f3 | |||
| 499770c088 | |||
| bdec2768bd | |||
| 2f4cdd97f5 | |||
| a70da86b84 | |||
| 419d979f7f | |||
| 7014fc360d | |||
| ade26bf991 | |||
| a3fef89b26 | |||
| eee195b3aa | |||
| b9273353dc | |||
| a9bd5df16a | |||
| 1a5fc300f4 | |||
| 6d9031f285 | |||
| 7a2b915e92 | |||
| ab81d31d20 | |||
| 8434cb878e | |||
| ec24132b6c | |||
| d0c19b3303 | |||
| fdf8409656 | |||
| 04bfac83b7 | |||
| 90a7c95496 | |||
| 923110b74f | |||
| 684774306d | |||
| 81cd655cab | |||
| 1a77a1a86f | |||
| 2055d737ad | |||
| 3ec8171bed | |||
| 1cbac6867b | |||
| bcc8d30aff | |||
| 998395061b | |||
| 6192549c1f | |||
| b427b263e2 | |||
| a5392ee747 | |||
| de81adf978 | |||
| edea08a6b0 | |||
| dfe9a31973 | |||
| bbd949970d | |||
| 4130e70367 | |||
| c1f85598eb | |||
| b338414e61 | |||
| 8abe4930d3 | |||
| dde718e7a6 | |||
| 2156662dea | |||
| d128f2ffab | |||
| 7c39318136 | |||
| 9402788b34 | |||
| 99c5c6079d | |||
| 10bcbcae30 | |||
| 95408e9953 | |||
| eec46b4f75 | |||
| 4063fd9cba | |||
| 5b28b78332 | |||
| 31e3c6c393 | |||
| 0ce5236dd1 | |||
| de496ef08b | |||
| 4a545d18e2 | |||
| 451263b841 | |||
| 4f84dedc03 | |||
| f2a2616b74 | |||
| 5d8efc79db | |||
| 9474abdf47 | |||
| 64d95c44ec | |||
| f3c75f8b44 | |||
| 934d0b8bdd | |||
| 0bb17295f0 | |||
| bc33fbf956 | |||
| fcf813417a | |||
| 699a2293cc | |||
| 6feb39b43c | |||
| 6386eb9721 | |||
| f12c74f51e | |||
| f932ee61b9 | |||
| 003a7cc608 | |||
| 718e9d777f | |||
| c5fe06c59d | |||
| 82aac00e0f | |||
| 956ae62139 | |||
| 8c40ba73d8 | |||
| d4306daea1 | |||
| c5a1ff9ef0 | |||
| 02a77fa04c | |||
| b05e0bec88 | |||
| fa9d2ad7ec | |||
| c82bd37169 | |||
| 99a62347fb | |||
| e407b5a323 | |||
| dcec3277cd | |||
| 37e0974afc | |||
| 9f5bfe1b99 | |||
| db979f7588 | |||
| b2a41d2be4 | |||
| 88e5c51a15 | |||
| e6de918676 | |||
| 1325459105 | |||
| 8e5a1b2abb | |||
| 6bf885375a | |||
| 99ba36e72f | |||
| 50a8ed3ee0 | |||
| d9e28d91a8 | |||
| b405b62f4a | |||
| 7e6dd664e8 | |||
| b6f47b5393 | |||
| fb76994c41 | |||
| 648d0deb1d | |||
| c87654dca1 | |||
| b48c7f7b3f | |||
| edbb37f736 | |||
| 3412f5979d | |||
| c256bc6d10 | |||
| 633e5e89f7 | |||
| 43299c63ca | |||
| 89359e4c63 | |||
| 36ee128375 | |||
| 4edfd2d4d2 | |||
| 269b054939 | |||
| f7c618e3b0 | |||
| 45e11091e5 | |||
| 1d3a1cc44b | |||
| 53735d7c3b | |||
| 3eba1dd27e | |||
| 571dd693b5 | |||
| 9c1d59882b | |||
| 72787c5b68 | |||
| 619d831848 | |||
| ebd5258975 | |||
| f71873c5fc | |||
| 72e9ca7519 | |||
| 5e6cd51bec | |||
| b599b19289 | |||
| 44e3e3fb49 | |||
| b29e2dcaff | |||
| acfb714bdf | |||
| 871c31a6f1 | |||
| 4cb5ffa93d | |||
| 7f4f8b97d0 | |||
| aab895c396 | |||
| 6ca844582c | |||
| 31fa2b6c68 | |||
| eec76042f4 | |||
| b8de7e448e | |||
| ae9230af40 | |||
| 2d506ea4c4 | |||
| 4fe744f528 | |||
| e07a3d95f8 | |||
| 50db741417 | |||
| 50644cf624 | |||
| a9dd124346 | |||
| c7f3abc257 | |||
| f95f60c829 | |||
| 92dfceb124 | |||
| 7811bf7e73 | |||
| 0c7f93f5f1 | |||
| ebf84f07ba | |||
| 831f3144a6 | |||
| c51dc4f927 | |||
| cc44e72d14 | |||
| 2ea1ef9090 | |||
| ba2a5f13f7 | |||
| a36983653e | |||
| 3c0ce60855 | |||
| 9ddf4f4f03 | |||
| 3dae0d7b4f | |||
| 59c1d5b96b | |||
| ba0e370dc1 | |||
| 440f39754b | |||
| 087436c98e | |||
| c8545d2a9c | |||
| 75bd49ff88 | |||
| 14f33205a7 | |||
| 13489248fa | |||
| f7ca656f07 | |||
| 279008adc3 | |||
| 4446b6b094 | |||
| 633062639b | |||
| 04d90ac49e | |||
| 0ffa22f9f6 | |||
| aa3787c8f0 | |||
| 1d4b797852 | |||
| 78a93d17c0 | |||
| 36a6a1adb6 | |||
| ff143ae10e | |||
| 448e050b0d | |||
| 064f374874 | |||
| 619d51e01f | |||
| d913f4aa40 | |||
| 82e61f3445 | |||
| 09127c5713 | |||
| aff87da15b | |||
| 2f2b19ff40 | |||
| 354b338316 | |||
| b19d64d852 | |||
| ee6e71e29c | |||
| 24b930ad1d | |||
| 5e8c8eb5ba | |||
| df06fb1f0b | |||
| bb5a2f2fc3 | |||
| 78a53d59cb | |||
| 03aaac3502 | |||
| 4c6346cc3e | |||
| ed6ceb7649 | |||
| 4deaa534f5 | |||
| c40e3581c7 | |||
| deafc24388 | |||
| 8b3db33a76 | |||
| 4194e5f42b | |||
| 49ab16239c | |||
| c9a0671477 | |||
| f56174ac5b | |||
| c87bbe1ff0 | |||
| 011cc17a81 | |||
| 2840272c5f | |||
| 7735e0406f | |||
| 7f1cdf1895 | |||
| 8a4c319d33 | |||
| 087fd5f368 | |||
| 005b515754 | |||
| bb6a664e14 | |||
| a8eb4f79f9 | |||
| 3668ec1716 | |||
| f16d29b337 | |||
| c236a62172 | |||
| 6b0257de42 | |||
| b0f0086fa4 | |||
| 61d7fec87a | |||
| 0f96c26de6 | |||
| 96d4fa46ed | |||
| fcfd4ec789 | |||
| 212c42a1e3 | |||
| 61abe3290b | |||
| 751f17aa48 | |||
| 9d1116e995 | |||
| 1567bef3b3 | |||
| 7a5533b2c3 | |||
| a0e69a9375 | |||
| 7bac51837b | |||
| 3499c49c17 | |||
| 0c9c8472e6 | |||
| 40ca13367e | |||
| fc28c006a6 | |||
| e3d832ff87 | |||
| 762dda44de | |||
| 26ef0f1991 | |||
| 7bce804260 | |||
| bad8300837 | |||
| aaf6795f92 | |||
| d3b1adf59f | |||
| d4ba6e1a0e | |||
| 317282927d | |||
| 22888d3082 | |||
| 68b21b37ea | |||
| c6f163c786 | |||
| a81fe4e1df | |||
| 13e03e619d | |||
| 41fa672df1 | |||
| 8c5026628a | |||
| 56b03c96b8 | |||
| cbecf121cd | |||
| 5987e0ab69 | |||
| 101b9a7eb1 | |||
| 68eff4036d | |||
| a27074abb5 | |||
| fa4bdb0a40 | |||
| edc1e734bf | |||
| fd5320bb57 | |||
| 1210c72e82 | |||
| 92487f5d0b | |||
| dee4d72e72 | |||
| 1666c42f0b | |||
| 24273268b7 | |||
| 93ed89bf40 | |||
| dcb5e01197 | |||
| dd7429d645 | |||
| 4be75e9728 | |||
| 3baa407f92 | |||
| eb6c59bc78 | |||
| c836f77266 | |||
| 75a208ef66 | |||
| b47a16743b | |||
| 862e8e4f4a | |||
| cb56590111 | |||
| 557125637d | |||
| 3b7ed25da9 | |||
| 4f831e661b | |||
| e2ec3089ce | |||
| 2f5507580b | |||
| 9e40bba6ba | |||
| c88b11c591 | |||
| b20147a3c8 | |||
| 51c3f42d8e | |||
| b0d539ccad | |||
| adb2503ea3 | |||
| 5b72b3412b | |||
| 129011c20b | |||
| 21a2d900ec | |||
| f83942684d | |||
| 97d3390fc8 | |||
| 23c146c38b | |||
| 04b2f13c37 | |||
| 2020ac4bd6 | |||
| 1efe9c0b24 | |||
| 7927732ff8 | |||
| d7f1e7c009 | |||
| b31cee6727 | |||
| 0d33381fad | |||
| 3a726777ca | |||
| 17109ecfb8 | |||
| 2edf9a857b | |||
| e69f9715eb | |||
| c35bb6de54 | |||
| 9960506cbe | |||
| 06d940efc3 | |||
| 8ea994d3c5 | |||
| 98d5b72727 | |||
| fe616f35c8 | |||
| 1d9c26a4b8 | |||
| d3046dad80 | |||
| e024cd715e | |||
| ca905ba28e | |||
| cc1d0685b3 | |||
| 5b67ab9924 | |||
| eb1771ef1f | |||
| 7e51a441e4 | |||
| a3034c7004 | |||
| b9af152efb | |||
| 67d074874d | |||
| 571fa585b6 | |||
| 479322bfaa | |||
| 9e7f84a556 | |||
| 8a303f527f | |||
| 28ec07d8ad | |||
| bbe98ea9c3 | |||
| 8581fbaa6d | |||
| fa0ae17958 | |||
| 1e4cf8bb44 | |||
| 12eb528b5a | |||
| 5b49376202 | |||
| cc8407522a | |||
| 35f93f299f | |||
| 6f79d26442 | |||
| b7bb2b59f7 | |||
| 10056d898e | |||
| baf4bacb1f | |||
| 3b9a1dc132 | |||
| 4435c7f52c | |||
| 4943331015 | |||
| e215e6ded2 | |||
| 182afb7dc6 | |||
| 7dbee87e09 | |||
| 5ac1c7ea85 | |||
| ae31831879 | |||
| 0db5d911fc | |||
| 59d5edef34 | |||
| 31c351c4d3 | |||
| 833174c929 | |||
| 6c62cfb2ef | |||
| e4bacf6614 | |||
| fb13a7df95 | |||
| 197e7ce911 | |||
| 0df802822c | |||
| f726d53ea3 | |||
| 3560ae6d94 | |||
| f21af26279 | |||
| ea55bd86b9 | |||
| fbee82951f | |||
| 6a3d1a98e0 | |||
| 0a75717602 | |||
| a6d8a149a8 | |||
| 145bf41c13 | |||
| 8298e4ec02 | |||
| 67a3920d85 | |||
| e006ab51ac | |||
| 53d374f1b9 | |||
| 0ae8dc0adf | |||
| db572b3854 | |||
| 92ce53aab8 | |||
| e5db7051a8 | |||
| 3fadb4b211 | |||
| 8d580779a3 | |||
| 77db257e2a | |||
| 65b5035a1d | |||
| 90cddfa824 | |||
| bc44e947f3 | |||
| 19d67bfecb | |||
| 6342427353 | |||
| da2a4d95a2 | |||
| 074d6b75fd | |||
| c21298a69b | |||
| d31497b196 | |||
| 98d40fed3a | |||
| 623346ab18 | |||
| 5451f8896c | |||
| 98d88b23f5 | |||
| 95be242adc | |||
| 914e5009fa | |||
| 14d989a91d | |||
| 42b60f8b02 | |||
| 6eb3c66a96 | |||
| 59611a0f3a | |||
| 7a2e13204f | |||
| 96addecff8 | |||
| f3a7befffa | |||
| c749bd405e | |||
| a582cfce3c | |||
| 73a2ff6974 | |||
| 8f3b4a1d5b | |||
| 0dff407d71 | |||
| e5eb3e22ea | |||
| 36b668fa06 | |||
| 938f437c53 | |||
| 7d2a5fa749 | |||
| b225ee6ea0 | |||
| 2b8feffad5 | |||
| 449df41f01 | |||
| 857bad6e53 | |||
| fd0ef8b66d | |||
| a01dd3818f | |||
| 31336dcf3f | |||
| 4e41b87e3d | |||
| d18a1cba24 | |||
| 6f3faf3863 | |||
| 140c6edeb9 | |||
| 3a6e4a221c | |||
| 39799fbf85 | |||
| 238449414f | |||
| 015443f42b | |||
| cc714d74c4 | |||
| 63b204eadd | |||
| de2d793e83 | |||
| 8788fd0ceb | |||
| 255257f3ea | |||
| f83135eb76 | |||
| 99e7905422 | |||
| efdbad56ab | |||
| de1ca3a0c5 | |||
| 1f981215dd | |||
| 14d058b940 | |||
| 94a7edd938 | |||
| 9286039c2a | |||
| e2e393c6f2 | |||
| f424b09410 | |||
| 767939af52 | |||
| 67316444b0 | |||
| 071529bd54 | |||
| f0fc791298 | |||
| bde7378bf0 | |||
| 7119bb052a | |||
| fd5cdaeea6 | |||
| 9e86c4e193 | |||
| d8415ba42e | |||
| c18b4fbe9f | |||
| b80b2218b5 | |||
| 275ad9d80a | |||
| 2218dac5d2 | |||
| e2bd7f80d0 | |||
| 354ea44340 | |||
| 1eda4a4102 | |||
| cf1a1eed70 | |||
| 5603f78fc4 | |||
| 929111698c | |||
| cb6b56859a | |||
| eaace0c668 | |||
| 6e4d3f0859 | |||
| 66459ce319 | |||
| 96b2b2de12 | |||
| 91ff7efeeb | |||
| c8d719ff7e | |||
| e1cd78634a | |||
| 4e730b3873 | |||
| 7fd902d335 | |||
| d54d7598bd | |||
| 7fc1cb150c | |||
| 142ad1a1cc | |||
| 425ff71c4e | |||
| b0969cafd0 | |||
| 2553363826 | |||
| 7419d807ff | |||
| ef53017520 | |||
| 6ee6993fd9 | |||
| 50540e18ff | |||
| 202d6863ce | |||
| af37d183b3 | |||
| 91c2278b97 | |||
| 5d3cb760a0 | |||
| 5326460f14 | |||
| 1b37fb5e17 | |||
| 862888a358 | |||
| 87208a05af | |||
| 758bd39e81 | |||
| 705e332b46 | |||
| cbaaa2f6ac | |||
| 0b86e330b1 | |||
| b9403e9516 | |||
| e9b4800dda | |||
| 9b42c68f7c | |||
| 4bc18e7a83 | |||
| fc8a93507c | |||
| 0359e2e15f | |||
| 5761ceb35a | |||
| 35920c9715 | |||
| 9b468a7cd7 | |||
| 464c86ac93 | |||
| 1d33f55cb8 | |||
| 5b949623c7 | |||
| 6d67664380 | |||
| 00ba7cadd8 | |||
| c59d71b282 | |||
| f70ee51029 | |||
| 0194665c33 | |||
| 05e72aa0c4 | |||
| 32525428e1 | |||
| 8ad06b7c13 | |||
| 8a17da2f7f | |||
| e1ad188641 | |||
| e15f0d73db | |||
| f4786d7f39 | |||
| defdcd2862 | |||
| 023f51fe16 | |||
| c8849583ad | |||
| 14154f7238 | |||
| 30c12301f8 | |||
| 44caf4f6f4 | |||
| 865da84abb | |||
| 0248810300 | |||
| d386fd646a | |||
| 868d37165f | |||
| 3a9bd972e2 | |||
| d96098c641 | |||
| f3feaf7f22 | |||
| cf028d0c3d | |||
| bb300ac686 | |||
| 25ddd91b24 | |||
| 8896ebb9a9 | |||
| f30bcd5357 | |||
| 3bbc2451b1 | |||
| 0dde58978a | |||
| 7b5e943cb6 | |||
| 7f3dab39b5 | |||
| e5dcceb82c | |||
| 2411f0e465 | |||
| 9edf375834 | |||
| 0fb27dc988 | |||
| 4bbbabcb2c | |||
| a45914193a | |||
| a5327c6a9a | |||
| 488a179ce1 | |||
| fa906a264b | |||
| 125f137562 | |||
| 05b8e25fff | |||
| 4ed89d48ab | |||
| 5db9abde43 | |||
| 15adc24208 | |||
| 056218dab1 | |||
| c8f35a9ce3 | |||
| 7f65d2366a | |||
| f58248b824 | |||
| 95f0dd2123 | |||
| b210c83a78 | |||
| b3a0aad37d | |||
| 41b0564b35 | |||
| 212829ade6 | |||
| b5be744d3c | |||
| e849e5bb4a | |||
| 6767ce71d6 | |||
| 64b6b2b273 | |||
| 8f796960f6 | |||
| 07cde58bdb | |||
| a3c37825cc | |||
| e3ecbaa4ab | |||
| 48d4e147d8 | |||
| d0f324f1e1 | |||
| 9a046cc14e | |||
| f0577df6de | |||
| 7cb596fa22 | |||
| bd9d51263a | |||
| f93c90d217 | |||
| c29bec485e | |||
| 61e068e5a2 | |||
| ff8dcb5efa | |||
| 35a7052b61 | |||
| 1d21471c78 | |||
| bc53fc6265 | |||
| 4f1c9d162e | |||
| 12313838d3 | |||
| bf82c9b74f | |||
| beb24f2a36 | |||
| 480799f718 | |||
| 8fb4d0e4b4 | |||
| b7417bee87 | |||
| 05b736c16e | |||
| 94db82573e | |||
| b91048968b | |||
| 263fd3c4c7 | |||
| d53f329d88 | |||
| 7804177af9 | |||
| 15e17c99f9 | |||
| 9dcc881fa6 | |||
| a6c850e4f4 | |||
| 3b309818e7 | |||
| 926452298d | |||
| 56397471b4 | |||
| 52c9e6af29 | |||
| 292acd71d6 | |||
| f9e977be70 | |||
| b493fee958 | |||
| d7b66d9b44 | |||
| ce85686a1f | |||
| 45da7cec5a | |||
| cd918492c6 | |||
| cd2457809f | |||
| e901914da7 | |||
| 8f09dd89f6 | |||
| 7b0727a401 | |||
| 15c68c67f4 | |||
| a9653400d3 | |||
| 9c9fe89f84 | |||
| e697c912c2 | |||
| 9c6f7485a6 | |||
| 305f41e4de | |||
| 367fdf3330 | |||
| 4fd89e4978 | |||
| a3e8d3cb1c | |||
| 588faad106 | |||
| 375801d5e6 | |||
| 092d4d49dd | |||
| 47c9b22d08 | |||
| 9e6da0a7ed | |||
| 17292440c0 | |||
| 881fa716c8 | |||
| 491a33d138 | |||
| 8637316e5e | |||
| fe65657de1 | |||
| 11c49ed23b | |||
| 0b686a8a1e | |||
| bbcd961897 | |||
| 5f9b2ce0ea | |||
| 5fa0b17c3d | |||
| e35bc46af6 | |||
| d1b3011292 | |||
| accad48e5b | |||
| 47146721b8 | |||
| 3830b3f74a | |||
| a081f292ca | |||
| cab7799f7b | |||
| efed8a2794 | |||
| f7f0ec2f54 | |||
| 15bc776fec | |||
| 4a4cd6cd02 | |||
| 52dd2b61bf | |||
| 4d10ffd506 | |||
| 2222740f50 | |||
| 829e889418 | |||
| 39e620c134 | |||
| 4a433e321f | |||
| 76d02feadb | |||
| aaa6296de2 | |||
| 3090e70857 | |||
| 04c560225b | |||
| 0ae58204c6 | |||
| 2da82bb4a7 | |||
| 852e7ebaa2 | |||
| d87e381f93 | |||
| 9efad4efed | |||
| 0d284bd574 | |||
| 3be028bc9d | |||
| bd1a43b699 | |||
| 5eecf3ff17 | |||
| 2875fa971c | |||
| 2280880cb7 | |||
| d0bfdd20f4 | |||
| d1d3ac9403 | |||
| 244dd0f150 | |||
| ae3cbbcaf6 | |||
| 7ef3f19c3c | |||
| bdb84e2bad | |||
| f76518e56a | |||
| 76924384af | |||
| ecd7de3dff | |||
| 6b5a8f83ce | |||
| b4b613b102 | |||
| 7032e02032 | |||
| 26dd041c6e | |||
| 7f99861218 | |||
| e65445b4d6 | |||
| 3ee958207a | |||
| 4341f4e224 | |||
| 1543cee7c8 | |||
| 491e951875 | |||
| 4bc723f87d | |||
| b1706f6908 | |||
| fca66abe2a | |||
| 8891193e83 | |||
| b9b70b0e66 | |||
| fe9152f67c | |||
| a9912d2fca | |||
| 67acb07e9e | |||
| 94f8e21c70 | |||
| 7b23a582b9 | |||
| 7c9e2f248c | |||
| dfd818420d | |||
| 11745b4e45 | |||
| 722bf7efcc | |||
| 9bafedc0fa | |||
| d994473b05 | |||
| ba9da49aa2 | |||
| f28c918c7e | |||
| 6ef42587ae | |||
| 30d8919ab1 | |||
| 4f1788b34d | |||
| a12c5cbcd8 | |||
| 1af4bee896 | |||
| d4bf9ee1ff | |||
| f41a11a16f | |||
| 1416b5d9d8 | |||
| a450789d9a | |||
| 29ff8716a2 | |||
| 5f94855dc3 | |||
| b58beebe72 | |||
| fd2bed7f9f | |||
| c1b9a11dd4 | |||
| 0ba94aceb6 | |||
| a413c725d4 | |||
| 17c742bbf5 | |||
| 8f1f59ce86 | |||
| 53357e8196 | |||
| 5ba2dbd9b1 | |||
| a3345c1f13 | |||
| 8286af6f54 | |||
| 8d2fca07e8 | |||
| 799cea64ac | |||
| 74330083b5 | |||
| 7319850902 | |||
| a95fd35426 | |||
| 704027f0ef | |||
| 6a062a3ed9 | |||
| 9a6c6ef97f | |||
| 9e56aff58a | |||
| c56ebbbea6 | |||
| 183af58b11 | |||
| cf1b8c34cc | |||
| e3cc4487fe | |||
| bcc069ddb8 | |||
| 9858ecd706 | |||
| 69038ce009 | |||
| 9cc65f8701 | |||
| da95f6ca4c | |||
| efd7c021ee | |||
| 9e33e19bf5 | |||
| 6eae3f7801 | |||
| be3d6c84cc | |||
| c83703cbdb | |||
| a03f7514db | |||
| 521da6518f | |||
| 93b54368f5 | |||
| 0526a075c5 | |||
| fc95386ea1 | |||
| 77382e918d | |||
| 7c5eaf9e5a | |||
| 3ac040bca1 | |||
| 3994c04585 | |||
| 147fa37fb1 | |||
| cec5f7abd1 | |||
| 3e4c9e5c64 | |||
| aac7b0d232 | |||
| d151a8c550 | |||
| b610c47f89 | |||
| 6c1a0b3931 | |||
| c95f84700c | |||
| f68796bd60 | |||
| f821bea0ad | |||
| 4f78bcb287 | |||
| 7586a1a367 | |||
| bf9a5882a7 | |||
| acc439ba17 | |||
| 9b14c1b6bf | |||
| 6a707cf586 | |||
| 97a51b0c7d | |||
| b9a0ede6ab | |||
| ae06bce888 | |||
| 25e10da427 | |||
| e842e181df | |||
| 28f3d431d4 | |||
| 5764efe544 | |||
| 720e9599c1 | |||
| 7d1c1c5b21 | |||
| 73ec12eafb | |||
| aef9aac312 | |||
| 74fb524e20 | |||
| ef0f85cd57 | |||
| 67d32f4649 | |||
| 9763f829a5 | |||
| 4430b91298 | |||
| eefae413d1 | |||
| d5af5a0c87 | |||
| ac3bccdc74 | |||
| 87282cb73c | |||
| afe2a466bb | |||
| 8ea6694d92 | |||
| 84c9bf7421 | |||
| 538e5248b0 | |||
| 13e736685a | |||
| 91182e3a70 | |||
| cc8aec6740 | |||
| e7e6d1818a | |||
| 9ffbed26c0 | |||
| e17826539b | |||
| 8639cfb4c2 | |||
| 6276b437a6 | |||
| 0911057744 | |||
| e135a6c931 | |||
| 24124709ca | |||
| 699e90437f | |||
| c54646b13d | |||
| cc3d0e1b01 | |||
| 3a9476d1b4 | |||
| 60d1f31bb0 | |||
| 5011efbec8 | |||
| 504ae9181c | |||
| 6cb7d6ec36 | |||
| d752337baa | |||
| b67ac44296 | |||
| d51e7c7e82 | |||
| 8b486c0310 | |||
| cdb7eeca46 | |||
| 876a9e084e | |||
| 373bfe70a0 | |||
| 55ab71ee5b | |||
| e342ac7e03 | |||
| 68cfffc4b4 | |||
| dd6fb1319b | |||
| afb66749a6 | |||
| 04c653a354 | |||
| 721764028e | |||
| 396a6a2ed0 | |||
| 08b4621899 | |||
| 829374e4fc | |||
| 17a7b49bda | |||
| afad0c18d9 | |||
| 761b3fad92 | |||
| a4beb37b81 | |||
| 105c3a48be | |||
| b75255cd9d | |||
| 293991d44b | |||
| d0c1ded5f3 | |||
| de6d19ea92 | |||
| ae3cbc9548 | |||
| 08fad080e3 | |||
| ab9fe45236 | |||
| 4aa630eeab | |||
| 86e435bbb1 | |||
| 73e2faa6c2 | |||
| fb2b45e562 | |||
| e8d448edcf | |||
| bbcd5eea3b | |||
| 3b91f96fc9 | |||
| ae1cffaf3c | |||
| 28247e7881 | |||
| 6dc884abc8 | |||
| 955780d3ab | |||
| d59d5a618b | |||
| 321ef388fe | |||
| 0bae286de9 | |||
| 39a72125e7 | |||
| 30163921ae | |||
| ac2f6674a3 | |||
| 667ccea722 | |||
| 0a6193252e | |||
| 98122794d4 | |||
| 134a8e21ae | |||
| de53e4bf1f | |||
| ca3b652bbd | |||
| 8f7078e822 | |||
| de4159a318 | |||
| 72b19ca680 | |||
| f244a97801 | |||
| df938fc1b4 | |||
| 2cdac665b0 | |||
| 61d3928bfb | |||
| 3c39c07f11 | |||
| a547d5bda5 | |||
| a1d4563f7a | |||
| afce73bd9d | |||
| 993a187c6f | |||
| bc00c29d11 | |||
| 9a5b84a007 | |||
| ad654e4484 | |||
| 03ae1f060b | |||
| 81d82e4f78 | |||
| 658e5d8f58 | |||
| 81c46679bd | |||
| 1c6309bf79 | |||
| 0ee71188ff | |||
| e53331c905 | |||
| 2e17db8a86 | |||
| d2357a0133 | |||
| 3d0c0ae437 | |||
| 9ef46659da | |||
| 5efd074af0 | |||
| dfc3deafa3 | |||
| 72eaaf6d55 | |||
| f3a1efd1cf | |||
| 624ae09f5c | |||
| ac3952b443 | |||
| c0fe912840 | |||
| f2e7d270ec | |||
| c3eb01013b | |||
| d896029e27 | |||
| 4973d2a04c | |||
| 1e3f17b5ab | |||
| 96783e53b4 | |||
| 149483b252 | |||
| 4cf38148dc | |||
| d21c97cc0f | |||
| 22d7161a52 | |||
| 74297d0a55 | |||
| 11f3ec7224 | |||
| d28448c5cd | |||
| 05d80d856c | |||
| 3de07473da | |||
| 8503cc7550 | |||
| d316037ad7 | |||
| fc4a993e1b | |||
| 8d6de0b9cf | |||
| b2c863a319 | |||
| f10cdba22e | |||
| 9d1ef009b8 | |||
| 8e777b3ba4 | |||
| 84c9cc6d15 | |||
| 37e016331f | |||
| a3f7458066 | |||
| f7ab8c4251 | |||
| b98269425e | |||
| c29a2f7c9c | |||
| 95754b47a6 | |||
| 532e60bedf | |||
| df56c843be | |||
| 4bb0764750 | |||
| 700e0cd65f | |||
| 6b217c52e6 | |||
| 904ac21020 | |||
| 8b8b23a8cd | |||
| 07b8f249cd | |||
| 0f78529f98 | |||
| 2062c28552 | |||
| 3a780cc57a | |||
| 3fad6ae3fd | |||
| 6c2be845dd | |||
| 7d65efec29 | |||
| 0a144b8c6b | |||
| 441811ecd7 | |||
| 5e012f8e3c | |||
| 610acc5ae9 | |||
| d4d23141c4 | |||
| 9ea1dbd2be | |||
| 21b0ad05a0 | |||
| 389702242d | |||
| 9accbe531e | |||
| d9efb36cf6 | |||
| c282e93a74 | |||
| e06657a798 | |||
| 69715f2ee0 | |||
| 291c17f608 | |||
| a239bdd28f | |||
| e9d9982e7c | |||
| 5ca479d252 | |||
| 449f2ae459 | |||
| f6490180eb | |||
| c389d35a7f | |||
| 9681f052a1 | |||
| e627e9b5ae | |||
| 4fb34de99e | |||
| 1f029b6ae7 | |||
| 443aaaa1a7 | |||
| e434627858 | |||
| 529037fda5 | |||
| 5e080c11bf | |||
| 860ea8a574 | |||
| a00b7e85ea | |||
| 0d0d77693f | |||
| 822ae69c1b | |||
| c19aa7acce | |||
| 5b62f8ea2b | |||
| 26ec7928d0 | |||
| 7f74433814 | |||
| 9643ecf8ca | |||
| 777b1bfe62 | |||
| b4997382da | |||
| 6e3b014471 | |||
| cf7b98b807 | |||
| 683cbc4c34 | |||
| 6ed6ed29b1 | |||
| d3d5fa3e85 | |||
| 92cfe8b074 | |||
| 2d92001076 | |||
| 4c7e8d0900 | |||
| 163ac3d3ee | |||
| 55ba31908a | |||
| f1e8c48c5e | |||
| 11b2e45ccc | |||
| f60eec4003 | |||
| f9909fbf85 | |||
| 9625924c60 | |||
| 8fadfd5035 | |||
| 25c451e5a0 | |||
| 938cb04789 | |||
| fda125638f | |||
| 36b063ed4f | |||
| 536e60d2c7 | |||
| af1a7c8ca3 | |||
| 7ecb039176 | |||
| 07d8d6e2f7 | |||
| c149d366bb | |||
| 8dcf494ef1 | |||
| 7b55bb4540 | |||
| d24e84d9ed | |||
| 03bc6ece1b | |||
| 2308f3d42c | |||
| 78a471ff71 | |||
| f711d683b5 | |||
| 6cc06d1739 | |||
| cbbeca3d17 | |||
| d3c0566679 | |||
| 61a51f5f23 | |||
| 9740a03f61 | |||
| 905e5773a3 | |||
| 68187c4642 | |||
| 6dda14dc47 | |||
| e0d7c831c7 | |||
| daf4436e07 | |||
| 9f0c72f93b | |||
| d066c3731b | |||
| 7ec1dc8817 | |||
| 67b3789133 | |||
| 93e14486d6 | |||
| f3d99e49d4 | |||
| a44985b41c | |||
| f270b960d6 | |||
| bac2d29a80 | |||
| d606d566ab | |||
| c4cad8e301 | |||
| 0946ed94fd | |||
| 316bf04d3d | |||
| 4eb918e656 | |||
| c08a1e26ab | |||
| e2a23b6ce9 | |||
| 2d6a92f22a | |||
| efa889d2e4 | |||
| 258963062b | |||
| 3e39fd09a9 | |||
| fb1c8db78a | |||
| 6156bffa2b | |||
| 71f772ebd0 | |||
| d44ac47bac | |||
| 2bdd9fa284 | |||
| cfaeb1539e | |||
| 3222fc645b | |||
| b8112eddec | |||
| 4ab6e9e2f8 | |||
| b77406bcb2 | |||
| 9617b1304e | |||
| a0f8674303 | |||
| 504db92e7d | |||
| 4b86e44693 | |||
| d68c46026b | |||
| c06d555647 | |||
| 2d02178e5c | |||
| 3bd0007e87 | |||
| 6e1c5786dc | |||
| 707b12a353 | |||
| 787620e2a2 | |||
| 3936411b9d | |||
| 94e17c456c | |||
| 19067711e7 | |||
| 3502c202f9 | |||
| 1076d587b5 | |||
| d447c460b1 | |||
| 06886d5a68 | |||
| 9080607b2c | |||
| 6f257bb3c2 | |||
| 2564f0c21d | |||
| 737bff6a36 | |||
| 06d488061f | |||
| 790ff2544a | |||
| 9ccea7acb1 | |||
| a639ea9e8a | |||
| ec6878f6ca | |||
| aa39967b28 | |||
| 305e8718b4 | |||
| 9f9ddcc2de | |||
| fb7cbe236b | |||
| f69eb24b5a | |||
| 7487743793 | |||
| 9aedce99b0 | |||
| 49b77b89ea | |||
| c6c9db3d0c | |||
| a6b7759880 | |||
| 2e3452af0f | |||
| 8827e1b217 | |||
| cb630ffab8 | |||
| 79c720c062 | |||
| 831590f6a9 | |||
| ab74ac11e4 | |||
| dec8578e70 | |||
| 1f6885bad0 | |||
| 4f1e5e4efd | |||
| 38e5b71abb | |||
| 4f90fc1db8 | |||
| c87ae86a8f | |||
| c796b6dea6 | |||
| c3a93d8d82 | |||
| 7f9b7b3f0e | |||
| 4c9e0f029e | |||
| 8214a9f66a | |||
| 6aede2d602 | |||
| f38a145418 | |||
| 9406c7bc82 | |||
| 225c36fbe5 | |||
| 6176e13612 | |||
| b047472650 | |||
| a83bb45fb8 | |||
| 243439a827 | |||
| 0b294c2334 | |||
| 2e35bac4e7 | |||
| 9d2788b46b | |||
| b0a2c3a2d6 | |||
| 98c9c5add9 | |||
| 0d4c45c585 | |||
| 9b1dcba94a | |||
| 347ba38cb4 | |||
| dcca71be61 | |||
| 4cef546ffc | |||
| ebfd7229d2 | |||
| 6c24443ff5 | |||
| e4132952a1 | |||
| d818dd3a41 | |||
| 50f5266b2c | |||
| 536a8ae6ad | |||
| d56d723fad | |||
| c766a2d70a | |||
| 1e6141c3d4 | |||
| ecf29db0e5 | |||
| ea118ae2e1 | |||
| f1e42bc50e | |||
| bec78ba154 | |||
| 568e578310 | |||
| 803475fb69 | |||
| 7629656926 | |||
| 6d023270f6 | |||
| 7a1c68a845 | |||
| 688c3e8e40 | |||
| 7829c890db | |||
| aeae97829f | |||
| fdffee8a60 | |||
| 802b98c72b | |||
| 5d2d51a0fb | |||
| 1f1cc09df6 | |||
| 5fd5990dce | |||
| 2447672269 | |||
| f9257843b5 | |||
| eedaba682f | |||
| d39f794eda | |||
| 0a77249178 | |||
| ab108a0e31 | |||
| 0bd6d9340e | |||
| 371337a95b | |||
| d4eb52d13d | |||
| 9ecb13d63a | |||
| 072ed01c38 | |||
| 1f7e40d04f | |||
| 8b2501b4b9 | |||
| 5cbf1fa8ca | |||
| 8db92dbe26 | |||
| 743995e0e6 | |||
| d3f4cef74d | |||
| 3b419cfc6f | |||
| 7ccd6fc47c | |||
| 18adc40d87 | |||
| 0b59ecdefd | |||
| 536f338441 | |||
| f58b211ed3 | |||
| c949188b9d | |||
| 82df83a96b | |||
| 22502ebb85 | |||
| 6f8064da6b | |||
| 674f750a57 | |||
| 74b3eb3dea | |||
| 3436842102 | |||
| e0b825a8d0 | |||
| c4a997cd85 | |||
| 2e5c6f5975 | |||
| cca51aa151 | |||
| b58d4f70f6 | |||
| 3a1aeea3c5 | |||
| 31565ff0fd | |||
| 2ebf4e6a7b | |||
| c1f009ad9a | |||
| 9151e649a5 | |||
| 3aaabaa214 | |||
| 7487829a23 | |||
| a5da6f1817 | |||
| 84f6bee5da | |||
| 12ce2941c7 | |||
| 15fd39ea0e | |||
| 5ed9bd1896 | |||
| c186e816bd | |||
| baa00f65ae | |||
| 2dd1b8f0c5 | |||
| 17d7aec895 | |||
| a40386669f | |||
| eb98da9880 | |||
| 506355ca75 | |||
| 123f65eea6 | |||
| cc03063366 | |||
| bbe2c8b126 | |||
| 5602a3ae1e | |||
| 0a03741590 | |||
| 65d36ee861 | |||
| 5041bc3511 | |||
| 44a40c1466 | |||
| bed2edb99f | |||
| c206fc8779 | |||
| b17a5e0074 | |||
| d2ed8134f1 | |||
| 7df0751cc6 | |||
| 71786b10c5 | |||
| fc5fdc109d | |||
| c9a0da1e12 | |||
| eccbdbcd4d | |||
| 32670805fc | |||
| ebee0a2794 | |||
| fa8ed9ca76 | |||
| 31ec424b3d | |||
| a929f81e92 | |||
| a23819ed6a | |||
| af556a09f6 | |||
| fb0bd7b7a8 | |||
| 14fe3e0410 | |||
| 06a82a49ae | |||
| f3ed26a3fb | |||
| 5864051109 | |||
| 63d13d768b | |||
| ee2a80ecc0 | |||
| 02b63702d9 | |||
| fac1f4b188 | |||
| dd523da577 | |||
| 713eab45d3 | |||
| fd99ce3329 | |||
| 8fcbbd3d53 | |||
| af150e4a1c | |||
| bf0e094142 | |||
| 71ca79448c | |||
| fd5eac5f71 | |||
| 90071fe42b | |||
| 072dfdaee4 | |||
| fd9a027aca | |||
| 3e07196f89 | |||
| d356b89f3c | |||
| d51ca32404 | |||
| 344e2664d4 | |||
| 07f6690206 | |||
| 2400eb4ca2 | |||
| 2add2007c1 | |||
| 563b42faf0 | |||
| 684165b882 | |||
| 5ac2f82267 | |||
| 94d7c3ba44 | |||
| c7edde1a69 | |||
| ed858f5354 | |||
| 5fda1fbd46 | |||
| 4d77f18cba |
@ -9,17 +9,30 @@ parameters:
|
|||||||
default: false
|
default: false
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
|
# Ensure running with CircleCI/huggingface
|
||||||
|
check_circleci_user:
|
||||||
|
docker:
|
||||||
|
- image: cimg/python:3.8.12
|
||||||
|
parallelism: 1
|
||||||
|
steps:
|
||||||
|
- run: echo $CIRCLE_PROJECT_USERNAME
|
||||||
|
- run: |
|
||||||
|
if [ "$CIRCLE_PROJECT_USERNAME" = "huggingface" ]; then
|
||||||
|
exit 0
|
||||||
|
else
|
||||||
|
echo "The CI is running under $CIRCLE_PROJECT_USERNAME personal account. Please follow https://support.circleci.com/hc/en-us/articles/360008097173-Troubleshooting-why-pull-requests-are-not-triggering-jobs-on-my-organization- to fix it."; exit -1
|
||||||
|
fi
|
||||||
# Fetch the tests to run
|
# Fetch the tests to run
|
||||||
fetch_tests:
|
fetch_tests:
|
||||||
working_directory: ~/transformers
|
working_directory: ~/transformers
|
||||||
docker:
|
docker:
|
||||||
- image: cimg/python:3.7.12
|
- image: cimg/python:3.8.12
|
||||||
parallelism: 1
|
parallelism: 1
|
||||||
steps:
|
steps:
|
||||||
- checkout
|
- checkout
|
||||||
- run: pip install --upgrade pip
|
- run: pip install --upgrade --upgrade-strategy eager pip
|
||||||
- run: pip install GitPython
|
- run: pip install -U --upgrade-strategy eager GitPython
|
||||||
- run: pip install .
|
- run: pip install -U --upgrade-strategy eager .
|
||||||
- run: mkdir -p test_preparation
|
- run: mkdir -p test_preparation
|
||||||
- run: python utils/tests_fetcher.py | tee tests_fetched_summary.txt
|
- run: python utils/tests_fetcher.py | tee tests_fetched_summary.txt
|
||||||
- store_artifacts:
|
- store_artifacts:
|
||||||
@ -30,22 +43,41 @@ jobs:
|
|||||||
else
|
else
|
||||||
touch test_preparation/test_list.txt
|
touch test_preparation/test_list.txt
|
||||||
fi
|
fi
|
||||||
- run: python utils/tests_fetcher.py --filter_pipeline_tests
|
- run: |
|
||||||
|
if [ -f examples_test_list.txt ]; then
|
||||||
|
mv examples_test_list.txt test_preparation/examples_test_list.txt
|
||||||
|
else
|
||||||
|
touch test_preparation/examples_test_list.txt
|
||||||
|
fi
|
||||||
|
- run: |
|
||||||
|
if [ -f filtered_test_list_cross_tests.txt ]; then
|
||||||
|
mv filtered_test_list_cross_tests.txt test_preparation/filtered_test_list_cross_tests.txt
|
||||||
|
else
|
||||||
|
touch test_preparation/filtered_test_list_cross_tests.txt
|
||||||
|
fi
|
||||||
|
- run: |
|
||||||
|
if [ -f doctest_list.txt ]; then
|
||||||
|
cp doctest_list.txt test_preparation/doctest_list.txt
|
||||||
|
else
|
||||||
|
touch test_preparation/doctest_list.txt
|
||||||
|
fi
|
||||||
|
- run: |
|
||||||
|
if [ -f test_repo_utils.txt ]; then
|
||||||
|
mv test_repo_utils.txt test_preparation/test_repo_utils.txt
|
||||||
|
else
|
||||||
|
touch test_preparation/test_repo_utils.txt
|
||||||
|
fi
|
||||||
|
- run: python utils/tests_fetcher.py --filter_tests
|
||||||
- run: |
|
- run: |
|
||||||
if [ -f test_list.txt ]; then
|
if [ -f test_list.txt ]; then
|
||||||
mv test_list.txt test_preparation/filtered_test_list.txt
|
mv test_list.txt test_preparation/filtered_test_list.txt
|
||||||
else
|
else
|
||||||
touch test_preparation/filtered_test_list.txt
|
touch test_preparation/filtered_test_list.txt
|
||||||
fi
|
fi
|
||||||
- run: python utils/tests_fetcher.py --filters tests examples | tee examples_tests_fetched_summary.txt
|
|
||||||
- run: |
|
|
||||||
if [ -f test_list.txt ]; then
|
|
||||||
mv test_list.txt test_preparation/examples_test_list.txt
|
|
||||||
else
|
|
||||||
touch test_preparation/examples_test_list.txt
|
|
||||||
fi
|
|
||||||
- store_artifacts:
|
- store_artifacts:
|
||||||
path: test_preparation/test_list.txt
|
path: test_preparation/test_list.txt
|
||||||
|
- store_artifacts:
|
||||||
|
path: test_preparation/doctest_list.txt
|
||||||
- store_artifacts:
|
- store_artifacts:
|
||||||
path: ~/transformers/test_preparation/filtered_test_list.txt
|
path: ~/transformers/test_preparation/filtered_test_list.txt
|
||||||
- store_artifacts:
|
- store_artifacts:
|
||||||
@ -59,6 +91,8 @@ jobs:
|
|||||||
- run: cp test_preparation/generated_config.yml test_preparation/generated_config.txt
|
- run: cp test_preparation/generated_config.yml test_preparation/generated_config.txt
|
||||||
- store_artifacts:
|
- store_artifacts:
|
||||||
path: test_preparation/generated_config.txt
|
path: test_preparation/generated_config.txt
|
||||||
|
- store_artifacts:
|
||||||
|
path: test_preparation/filtered_test_list_cross_tests.txt
|
||||||
- continuation/continue:
|
- continuation/continue:
|
||||||
configuration_path: test_preparation/generated_config.yml
|
configuration_path: test_preparation/generated_config.yml
|
||||||
|
|
||||||
@ -66,17 +100,22 @@ jobs:
|
|||||||
fetch_all_tests:
|
fetch_all_tests:
|
||||||
working_directory: ~/transformers
|
working_directory: ~/transformers
|
||||||
docker:
|
docker:
|
||||||
- image: cimg/python:3.7.12
|
- image: cimg/python:3.8.12
|
||||||
parallelism: 1
|
parallelism: 1
|
||||||
steps:
|
steps:
|
||||||
- run: pip install --upgrade pip
|
- checkout
|
||||||
- run: pip install .
|
- run: pip install --upgrade --upgrade-strategy eager pip
|
||||||
|
- run: pip install -U --upgrade-strategy eager GitPython
|
||||||
|
- run: pip install -U --upgrade-strategy eager .
|
||||||
- run: |
|
- run: |
|
||||||
mkdir test_preparation
|
mkdir test_preparation
|
||||||
echo "tests" > test_preparation/test_list.txt
|
echo -n "tests" > test_preparation/test_list.txt
|
||||||
echo "tests" > test_preparation/examples_test_list.txt
|
echo -n "all" > test_preparation/examples_test_list.txt
|
||||||
- run: python utils/tests_fetcher.py --filter_pipeline_tests
|
echo -n "tests/repo_utils" > test_preparation/test_repo_utils.txt
|
||||||
- run: mv test_list.txt test_preparation/filtered_test_list.txt
|
- run: |
|
||||||
|
echo -n "tests" > test_list.txt
|
||||||
|
python utils/tests_fetcher.py --filter_tests
|
||||||
|
mv test_list.txt test_preparation/filtered_test_list.txt
|
||||||
- run: python .circleci/create_circleci_config.py --fetcher_folder test_preparation
|
- run: python .circleci/create_circleci_config.py --fetcher_folder test_preparation
|
||||||
- run: cp test_preparation/generated_config.yml test_preparation/generated_config.txt
|
- run: cp test_preparation/generated_config.yml test_preparation/generated_config.txt
|
||||||
- store_artifacts:
|
- store_artifacts:
|
||||||
@ -87,7 +126,7 @@ jobs:
|
|||||||
check_code_quality:
|
check_code_quality:
|
||||||
working_directory: ~/transformers
|
working_directory: ~/transformers
|
||||||
docker:
|
docker:
|
||||||
- image: cimg/python:3.7.12
|
- image: cimg/python:3.8.12
|
||||||
resource_class: large
|
resource_class: large
|
||||||
environment:
|
environment:
|
||||||
TRANSFORMERS_IS_CI: yes
|
TRANSFORMERS_IS_CI: yes
|
||||||
@ -97,26 +136,38 @@ jobs:
|
|||||||
- checkout
|
- checkout
|
||||||
- restore_cache:
|
- restore_cache:
|
||||||
keys:
|
keys:
|
||||||
- v0.5-code_quality-{{ checksum "setup.py" }}
|
- v0.7-code_quality-{{ checksum "setup.py" }}
|
||||||
- v0.5-code-quality
|
- v0.7-code-quality
|
||||||
- run: pip install --upgrade pip
|
- restore_cache:
|
||||||
- run: pip install .[all,quality]
|
keys:
|
||||||
|
- v0.7-code_quality-{{ checksum "setup.py" }}-site-packages
|
||||||
|
- v0.7-code-quality-site-packages
|
||||||
|
- run: pip install --upgrade --upgrade-strategy eager pip
|
||||||
|
- run: pip install -U --upgrade-strategy eager .[all,quality]
|
||||||
- save_cache:
|
- save_cache:
|
||||||
key: v0.5-code_quality-{{ checksum "setup.py" }}
|
key: v0.7-code_quality-{{ checksum "setup.py" }}
|
||||||
paths:
|
paths:
|
||||||
- '~/.cache/pip'
|
- '~/.cache/pip'
|
||||||
- run: black --check --preview examples tests src utils
|
- save_cache:
|
||||||
- run: isort --check-only examples tests src utils
|
key: v0.7-code_quality-{{ checksum "setup.py" }}-site-packages
|
||||||
|
paths:
|
||||||
|
- '~/.pyenv/versions/'
|
||||||
|
- run:
|
||||||
|
name: Show installed libraries and their versions
|
||||||
|
command: pip freeze | tee installed.txt
|
||||||
|
- store_artifacts:
|
||||||
|
path: ~/transformers/installed.txt
|
||||||
|
- run: black --check examples tests src utils
|
||||||
|
- run: ruff examples tests src utils
|
||||||
- run: python utils/custom_init_isort.py --check_only
|
- run: python utils/custom_init_isort.py --check_only
|
||||||
- run: python utils/sort_auto_mappings.py --check_only
|
- run: python utils/sort_auto_mappings.py --check_only
|
||||||
- run: flake8 examples tests src utils
|
|
||||||
- run: doc-builder style src/transformers docs/source --max_len 119 --check_only --path_to_docs docs/source
|
- run: doc-builder style src/transformers docs/source --max_len 119 --check_only --path_to_docs docs/source
|
||||||
- run: python utils/check_doc_toc.py
|
- run: python utils/check_doc_toc.py
|
||||||
|
|
||||||
check_repository_consistency:
|
check_repository_consistency:
|
||||||
working_directory: ~/transformers
|
working_directory: ~/transformers
|
||||||
docker:
|
docker:
|
||||||
- image: cimg/python:3.7.12
|
- image: cimg/python:3.8.12
|
||||||
resource_class: large
|
resource_class: large
|
||||||
environment:
|
environment:
|
||||||
TRANSFORMERS_IS_CI: yes
|
TRANSFORMERS_IS_CI: yes
|
||||||
@ -126,23 +177,38 @@ jobs:
|
|||||||
- checkout
|
- checkout
|
||||||
- restore_cache:
|
- restore_cache:
|
||||||
keys:
|
keys:
|
||||||
- v0.5-repository_consistency-{{ checksum "setup.py" }}
|
- v0.7-repository_consistency-{{ checksum "setup.py" }}
|
||||||
- v0.5-repository_consistency
|
- v0.7-repository_consistency
|
||||||
- run: pip install --upgrade pip
|
- restore_cache:
|
||||||
- run: pip install .[all,quality]
|
keys:
|
||||||
|
- v0.7-repository_consistency-{{ checksum "setup.py" }}-site-packages
|
||||||
|
- v0.7-repository_consistency-site-packages
|
||||||
|
- run: pip install --upgrade --upgrade-strategy eager pip
|
||||||
|
- run: pip install -U --upgrade-strategy eager .[all,quality]
|
||||||
- save_cache:
|
- save_cache:
|
||||||
key: v0.5-repository_consistency-{{ checksum "setup.py" }}
|
key: v0.7-repository_consistency-{{ checksum "setup.py" }}
|
||||||
paths:
|
paths:
|
||||||
- '~/.cache/pip'
|
- '~/.cache/pip'
|
||||||
|
- save_cache:
|
||||||
|
key: v0.7-repository_consistency-{{ checksum "setup.py" }}-site-packages
|
||||||
|
paths:
|
||||||
|
- '~/.pyenv/versions/'
|
||||||
|
- run:
|
||||||
|
name: Show installed libraries and their versions
|
||||||
|
command: pip freeze | tee installed.txt
|
||||||
|
- store_artifacts:
|
||||||
|
path: ~/transformers/installed.txt
|
||||||
- run: python utils/check_copies.py
|
- run: python utils/check_copies.py
|
||||||
- run: python utils/check_table.py
|
- run: python utils/check_table.py
|
||||||
- run: python utils/check_dummies.py
|
- run: python utils/check_dummies.py
|
||||||
- run: python utils/check_repo.py
|
- run: python utils/check_repo.py
|
||||||
- run: python utils/check_inits.py
|
- run: python utils/check_inits.py
|
||||||
- run: python utils/check_config_docstrings.py
|
- run: python utils/check_config_docstrings.py
|
||||||
|
- run: python utils/check_config_attributes.py
|
||||||
|
- run: python utils/check_doctest_list.py
|
||||||
- run: make deps_table_check_updated
|
- run: make deps_table_check_updated
|
||||||
- run: python utils/tests_fetcher.py --sanity_check
|
|
||||||
- run: python utils/update_metadata.py --check-only
|
- run: python utils/update_metadata.py --check-only
|
||||||
|
- run: python utils/check_task_guides.py
|
||||||
|
|
||||||
workflows:
|
workflows:
|
||||||
version: 2
|
version: 2
|
||||||
@ -150,6 +216,7 @@ workflows:
|
|||||||
when:
|
when:
|
||||||
not: <<pipeline.parameters.nightly>>
|
not: <<pipeline.parameters.nightly>>
|
||||||
jobs:
|
jobs:
|
||||||
|
- check_circleci_user
|
||||||
- check_code_quality
|
- check_code_quality
|
||||||
- check_repository_consistency
|
- check_repository_consistency
|
||||||
- fetch_tests
|
- fetch_tests
|
||||||
@ -157,6 +224,7 @@ workflows:
|
|||||||
nightly:
|
nightly:
|
||||||
when: <<pipeline.parameters.nightly>>
|
when: <<pipeline.parameters.nightly>>
|
||||||
jobs:
|
jobs:
|
||||||
|
- check_circleci_user
|
||||||
- check_code_quality
|
- check_code_quality
|
||||||
- check_repository_consistency
|
- check_repository_consistency
|
||||||
- fetch_all_tests
|
- fetch_all_tests
|
||||||
@ -15,17 +15,36 @@
|
|||||||
|
|
||||||
import argparse
|
import argparse
|
||||||
import copy
|
import copy
|
||||||
|
import glob
|
||||||
import os
|
import os
|
||||||
|
import random
|
||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
from typing import Any, Dict, List, Optional
|
from typing import Any, Dict, List, Optional
|
||||||
|
|
||||||
import yaml
|
import yaml
|
||||||
|
|
||||||
|
|
||||||
COMMON_ENV_VARIABLES = {"OMP_NUM_THREADS": 1, "TRANSFORMERS_IS_CI": True, "PYTEST_TIMEOUT": 120}
|
COMMON_ENV_VARIABLES = {
|
||||||
|
"OMP_NUM_THREADS": 1,
|
||||||
|
"TRANSFORMERS_IS_CI": True,
|
||||||
|
"PYTEST_TIMEOUT": 120,
|
||||||
|
"RUN_PIPELINE_TESTS": False,
|
||||||
|
"RUN_PT_TF_CROSS_TESTS": False,
|
||||||
|
"RUN_PT_FLAX_CROSS_TESTS": False,
|
||||||
|
}
|
||||||
COMMON_PYTEST_OPTIONS = {"max-worker-restart": 0, "dist": "loadfile", "s": None}
|
COMMON_PYTEST_OPTIONS = {"max-worker-restart": 0, "dist": "loadfile", "s": None}
|
||||||
DEFAULT_DOCKER_IMAGE = [{"image": "cimg/python:3.7.12"}]
|
DEFAULT_DOCKER_IMAGE = [{"image": "cimg/python:3.8.12"}]
|
||||||
TORCH_SCATTER_INSTALL = "pip install torch-scatter -f https://pytorch-geometric.com/whl/torch-1.12.0+cpu.html"
|
|
||||||
|
|
||||||
|
class EmptyJob:
|
||||||
|
job_name = "empty"
|
||||||
|
|
||||||
|
def to_dict(self):
|
||||||
|
return {
|
||||||
|
"working_directory": "~/transformers",
|
||||||
|
"docker": copy.deepcopy(DEFAULT_DOCKER_IMAGE),
|
||||||
|
"steps":["checkout"],
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
@ -33,7 +52,7 @@ class CircleCIJob:
|
|||||||
name: str
|
name: str
|
||||||
additional_env: Dict[str, Any] = None
|
additional_env: Dict[str, Any] = None
|
||||||
cache_name: str = None
|
cache_name: str = None
|
||||||
cache_version: str = "0.5"
|
cache_version: str = "0.7"
|
||||||
docker_image: List[Dict[str, str]] = None
|
docker_image: List[Dict[str, str]] = None
|
||||||
install_steps: List[str] = None
|
install_steps: List[str] = None
|
||||||
marker: Optional[str] = None
|
marker: Optional[str] = None
|
||||||
@ -43,6 +62,8 @@ class CircleCIJob:
|
|||||||
resource_class: Optional[str] = "xlarge"
|
resource_class: Optional[str] = "xlarge"
|
||||||
tests_to_run: Optional[List[str]] = None
|
tests_to_run: Optional[List[str]] = None
|
||||||
working_directory: str = "~/transformers"
|
working_directory: str = "~/transformers"
|
||||||
|
# This should be only used for doctest job!
|
||||||
|
command_timeout: Optional[int] = None
|
||||||
|
|
||||||
def __post_init__(self):
|
def __post_init__(self):
|
||||||
# Deal with defaults for mutable attributes.
|
# Deal with defaults for mutable attributes.
|
||||||
@ -59,12 +80,16 @@ class CircleCIJob:
|
|||||||
self.pytest_options = {}
|
self.pytest_options = {}
|
||||||
if isinstance(self.tests_to_run, str):
|
if isinstance(self.tests_to_run, str):
|
||||||
self.tests_to_run = [self.tests_to_run]
|
self.tests_to_run = [self.tests_to_run]
|
||||||
|
if self.parallelism is None:
|
||||||
|
self.parallelism = 1
|
||||||
|
|
||||||
def to_dict(self):
|
def to_dict(self):
|
||||||
|
env = COMMON_ENV_VARIABLES.copy()
|
||||||
|
env.update(self.additional_env)
|
||||||
job = {
|
job = {
|
||||||
"working_directory": self.working_directory,
|
"working_directory": self.working_directory,
|
||||||
"docker": self.docker_image,
|
"docker": self.docker_image,
|
||||||
"environment": {**COMMON_ENV_VARIABLES, **self.additional_env},
|
"environment": env,
|
||||||
}
|
}
|
||||||
if self.resource_class is not None:
|
if self.resource_class is not None:
|
||||||
job["resource_class"] = self.resource_class
|
job["resource_class"] = self.resource_class
|
||||||
@ -81,6 +106,14 @@ class CircleCIJob:
|
|||||||
]
|
]
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"restore_cache": {
|
||||||
|
"keys": [
|
||||||
|
f"v{self.cache_version}-{self.cache_name}-" + '{{ checksum "setup.py" }}-site-packages',
|
||||||
|
f"v{self.cache_version}-{self.cache_name}-site-packages",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
]
|
]
|
||||||
steps.extend([{"run": l} for l in self.install_steps])
|
steps.extend([{"run": l} for l in self.install_steps])
|
||||||
steps.append(
|
steps.append(
|
||||||
@ -91,21 +124,107 @@ class CircleCIJob:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
steps.append(
|
||||||
|
{
|
||||||
|
"save_cache": {
|
||||||
|
"key": f"v{self.cache_version}-{self.cache_name}-" + '{{ checksum "setup.py" }}-site-packages',
|
||||||
|
"paths": ["~/.pyenv/versions/"],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
)
|
||||||
|
steps.append({"run": {"name": "Show installed libraries and their versions", "command": "pip freeze | tee installed.txt"}})
|
||||||
|
steps.append({"store_artifacts": {"path": "~/transformers/installed.txt"}})
|
||||||
|
|
||||||
all_options = {**COMMON_PYTEST_OPTIONS, **self.pytest_options}
|
all_options = {**COMMON_PYTEST_OPTIONS, **self.pytest_options}
|
||||||
pytest_flags = [f"--{key}={value}" if value is not None else f"-{key}" for key, value in all_options.items()]
|
pytest_flags = [f"--{key}={value}" if (value is not None or key in ["doctest-modules"]) else f"-{key}" for key, value in all_options.items()]
|
||||||
pytest_flags.append(
|
pytest_flags.append(
|
||||||
f"--make-reports={self.name}" if "examples" in self.name else f"--make-reports=tests_{self.name}"
|
f"--make-reports={self.name}" if "examples" in self.name else f"--make-reports=tests_{self.name}"
|
||||||
)
|
)
|
||||||
test_command = f"python -m pytest -n {self.pytest_num_workers} " + " ".join(pytest_flags)
|
test_command = ""
|
||||||
if self.tests_to_run is None:
|
if self.command_timeout:
|
||||||
test_command += " << pipeline.parameters.tests_to_run >>"
|
test_command = f"timeout {self.command_timeout} "
|
||||||
|
test_command += f"python -m pytest -n {self.pytest_num_workers} " + " ".join(pytest_flags)
|
||||||
|
|
||||||
|
if self.parallelism == 1:
|
||||||
|
if self.tests_to_run is None:
|
||||||
|
test_command += " << pipeline.parameters.tests_to_run >>"
|
||||||
|
else:
|
||||||
|
test_command += " " + " ".join(self.tests_to_run)
|
||||||
else:
|
else:
|
||||||
test_command += " " + " ".join(self.tests_to_run)
|
# We need explicit list instead of `pipeline.parameters.tests_to_run` (only available at job runtime)
|
||||||
|
tests = self.tests_to_run
|
||||||
|
if tests is None:
|
||||||
|
folder = os.environ["test_preparation_dir"]
|
||||||
|
test_file = os.path.join(folder, "filtered_test_list.txt")
|
||||||
|
if os.path.exists(test_file):
|
||||||
|
with open(test_file) as f:
|
||||||
|
tests = f.read().split(" ")
|
||||||
|
|
||||||
|
# expand the test list
|
||||||
|
if tests == ["tests"]:
|
||||||
|
tests = [os.path.join("tests", x) for x in os.listdir("tests")]
|
||||||
|
expanded_tests = []
|
||||||
|
for test in tests:
|
||||||
|
if test.endswith(".py"):
|
||||||
|
expanded_tests.append(test)
|
||||||
|
elif test == "tests/models":
|
||||||
|
expanded_tests.extend([os.path.join(test, x) for x in os.listdir(test)])
|
||||||
|
elif test == "tests/pipelines":
|
||||||
|
expanded_tests.extend([os.path.join(test, x) for x in os.listdir(test)])
|
||||||
|
else:
|
||||||
|
expanded_tests.append(test)
|
||||||
|
# Avoid long tests always being collected together
|
||||||
|
random.shuffle(expanded_tests)
|
||||||
|
tests = " ".join(expanded_tests)
|
||||||
|
|
||||||
|
# Each executor to run ~10 tests
|
||||||
|
n_executors = max(len(tests) // 10, 1)
|
||||||
|
# Avoid empty test list on some executor(s) or launching too many executors
|
||||||
|
if n_executors > self.parallelism:
|
||||||
|
n_executors = self.parallelism
|
||||||
|
job["parallelism"] = n_executors
|
||||||
|
|
||||||
|
# Need to be newline separated for the command `circleci tests split` below
|
||||||
|
command = f'echo {tests} | tr " " "\\n" >> tests.txt'
|
||||||
|
steps.append({"run": {"name": "Get tests", "command": command}})
|
||||||
|
|
||||||
|
command = 'TESTS=$(circleci tests split tests.txt) && echo $TESTS > splitted_tests.txt'
|
||||||
|
steps.append({"run": {"name": "Split tests", "command": command}})
|
||||||
|
|
||||||
|
steps.append({"store_artifacts": {"path": "~/transformers/tests.txt"}})
|
||||||
|
steps.append({"store_artifacts": {"path": "~/transformers/splitted_tests.txt"}})
|
||||||
|
|
||||||
|
test_command = ""
|
||||||
|
if self.timeout:
|
||||||
|
test_command = f"timeout {self.timeout} "
|
||||||
|
test_command += f"python -m pytest -n {self.pytest_num_workers} " + " ".join(pytest_flags)
|
||||||
|
test_command += " $(cat splitted_tests.txt)"
|
||||||
if self.marker is not None:
|
if self.marker is not None:
|
||||||
test_command += f" -m {self.marker}"
|
test_command += f" -m {self.marker}"
|
||||||
test_command += " | tee tests_output.txt"
|
|
||||||
|
if self.name == "pr_documentation_tests":
|
||||||
|
# can't use ` | tee tee tests_output.txt` as usual
|
||||||
|
test_command += " > tests_output.txt"
|
||||||
|
# Save the return code, so we can check if it is timeout in the next step.
|
||||||
|
test_command += '; touch "$?".txt'
|
||||||
|
# Never fail the test step for the doctest job. We will check the results in the next step, and fail that
|
||||||
|
# step instead if the actual test failures are found. This is to avoid the timeout being reported as test
|
||||||
|
# failure.
|
||||||
|
test_command = f"({test_command}) || true"
|
||||||
|
else:
|
||||||
|
test_command += " | tee tests_output.txt"
|
||||||
steps.append({"run": {"name": "Run tests", "command": test_command}})
|
steps.append({"run": {"name": "Run tests", "command": test_command}})
|
||||||
|
|
||||||
|
# return code `124` means the previous (pytest run) step is timeout
|
||||||
|
if self.name == "pr_documentation_tests":
|
||||||
|
checkout_doctest_command = 'if [ -s reports/tests_pr_documentation_tests/failures_short.txt ]; '
|
||||||
|
checkout_doctest_command += 'then echo "some test failed"; '
|
||||||
|
checkout_doctest_command += 'cat reports/tests_pr_documentation_tests/failures_short.txt; '
|
||||||
|
checkout_doctest_command += 'cat reports/tests_pr_documentation_tests/summary_short.txt; exit -1; '
|
||||||
|
checkout_doctest_command += 'elif [ -s reports/tests_pr_documentation_tests/stats.txt ]; then echo "All tests pass!"; '
|
||||||
|
checkout_doctest_command += 'elif [ -f 124.txt ]; then echo "doctest timeout!"; else echo "other fatal error)"; exit -1; fi;'
|
||||||
|
steps.append({"run": {"name": "Check doctest results", "command": checkout_doctest_command}})
|
||||||
|
|
||||||
steps.append({"store_artifacts": {"path": "~/transformers/tests_output.txt"}})
|
steps.append({"store_artifacts": {"path": "~/transformers/tests_output.txt"}})
|
||||||
steps.append({"store_artifacts": {"path": "~/transformers/reports"}})
|
steps.append({"store_artifacts": {"path": "~/transformers/reports"}})
|
||||||
job["steps"] = steps
|
job["steps"] = steps
|
||||||
@ -121,14 +240,12 @@ torch_and_tf_job = CircleCIJob(
|
|||||||
"torch_and_tf",
|
"torch_and_tf",
|
||||||
additional_env={"RUN_PT_TF_CROSS_TESTS": True},
|
additional_env={"RUN_PT_TF_CROSS_TESTS": True},
|
||||||
install_steps=[
|
install_steps=[
|
||||||
"sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng git-lfs",
|
"sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng git-lfs cmake",
|
||||||
"git lfs install",
|
"git lfs install",
|
||||||
"pip install --upgrade pip",
|
"pip install --upgrade --upgrade-strategy eager pip",
|
||||||
"pip install .[sklearn,tf-cpu,torch,testing,sentencepiece,torch-speech,vision]",
|
"pip install -U --upgrade-strategy eager .[sklearn,tf-cpu,torch,testing,sentencepiece,torch-speech,vision]",
|
||||||
TORCH_SCATTER_INSTALL,
|
"pip install -U --upgrade-strategy eager tensorflow_probability",
|
||||||
"pip install tensorflow_probability",
|
"pip install -U --upgrade-strategy eager git+https://github.com/huggingface/accelerate",
|
||||||
"pip install https://github.com/kpu/kenlm/archive/master.zip",
|
|
||||||
"pip install git+https://github.com/huggingface/accelerate",
|
|
||||||
],
|
],
|
||||||
marker="is_pt_tf_cross_test",
|
marker="is_pt_tf_cross_test",
|
||||||
pytest_options={"rA": None, "durations": 0},
|
pytest_options={"rA": None, "durations": 0},
|
||||||
@ -140,11 +257,9 @@ torch_and_flax_job = CircleCIJob(
|
|||||||
additional_env={"RUN_PT_FLAX_CROSS_TESTS": True},
|
additional_env={"RUN_PT_FLAX_CROSS_TESTS": True},
|
||||||
install_steps=[
|
install_steps=[
|
||||||
"sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng",
|
"sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng",
|
||||||
"pip install --upgrade pip",
|
"pip install -U --upgrade-strategy eager --upgrade pip",
|
||||||
"pip install .[sklearn,flax,torch,testing,sentencepiece,torch-speech,vision]",
|
"pip install -U --upgrade-strategy eager .[sklearn,flax,torch,testing,sentencepiece,torch-speech,vision]",
|
||||||
TORCH_SCATTER_INSTALL,
|
"pip install -U --upgrade-strategy eager git+https://github.com/huggingface/accelerate",
|
||||||
"pip install https://github.com/kpu/kenlm/archive/master.zip",
|
|
||||||
"pip install git+https://github.com/huggingface/accelerate",
|
|
||||||
],
|
],
|
||||||
marker="is_pt_flax_cross_test",
|
marker="is_pt_flax_cross_test",
|
||||||
pytest_options={"rA": None, "durations": 0},
|
pytest_options={"rA": None, "durations": 0},
|
||||||
@ -155,12 +270,11 @@ torch_job = CircleCIJob(
|
|||||||
"torch",
|
"torch",
|
||||||
install_steps=[
|
install_steps=[
|
||||||
"sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng time",
|
"sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng time",
|
||||||
"pip install --upgrade pip",
|
"pip install --upgrade --upgrade-strategy eager pip",
|
||||||
"pip install .[sklearn,torch,testing,sentencepiece,torch-speech,vision,timm]",
|
"pip install -U --upgrade-strategy eager .[sklearn,torch,testing,sentencepiece,torch-speech,vision,timm]",
|
||||||
TORCH_SCATTER_INSTALL,
|
"pip install -U --upgrade-strategy eager git+https://github.com/huggingface/accelerate",
|
||||||
"pip install https://github.com/kpu/kenlm/archive/master.zip",
|
|
||||||
"pip install git+https://github.com/huggingface/accelerate",
|
|
||||||
],
|
],
|
||||||
|
parallelism=1,
|
||||||
pytest_num_workers=3,
|
pytest_num_workers=3,
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -168,12 +282,13 @@ torch_job = CircleCIJob(
|
|||||||
tf_job = CircleCIJob(
|
tf_job = CircleCIJob(
|
||||||
"tf",
|
"tf",
|
||||||
install_steps=[
|
install_steps=[
|
||||||
"sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng",
|
"sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng cmake",
|
||||||
"pip install --upgrade pip",
|
"pip install --upgrade --upgrade-strategy eager pip",
|
||||||
"pip install .[sklearn,tf-cpu,testing,sentencepiece,tf-speech,vision]",
|
"pip install -U --upgrade-strategy eager .[sklearn,tf-cpu,testing,sentencepiece,tf-speech,vision]",
|
||||||
"pip install tensorflow_probability",
|
"pip install -U --upgrade-strategy eager tensorflow_probability",
|
||||||
"pip install https://github.com/kpu/kenlm/archive/master.zip",
|
|
||||||
],
|
],
|
||||||
|
parallelism=1,
|
||||||
|
pytest_num_workers=6,
|
||||||
pytest_options={"rA": None},
|
pytest_options={"rA": None},
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -182,37 +297,38 @@ flax_job = CircleCIJob(
|
|||||||
"flax",
|
"flax",
|
||||||
install_steps=[
|
install_steps=[
|
||||||
"sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng",
|
"sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng",
|
||||||
"pip install --upgrade pip",
|
"pip install --upgrade --upgrade-strategy eager pip",
|
||||||
"pip install .[flax,testing,sentencepiece,flax-speech,vision]",
|
"pip install -U --upgrade-strategy eager .[flax,testing,sentencepiece,flax-speech,vision]",
|
||||||
"pip install https://github.com/kpu/kenlm/archive/master.zip",
|
|
||||||
],
|
],
|
||||||
|
parallelism=1,
|
||||||
pytest_options={"rA": None},
|
pytest_options={"rA": None},
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
pipelines_torch_job = CircleCIJob(
|
pipelines_torch_job = CircleCIJob(
|
||||||
"pipelines_torch",
|
"pipelines_torch",
|
||||||
|
additional_env={"RUN_PIPELINE_TESTS": True},
|
||||||
install_steps=[
|
install_steps=[
|
||||||
"sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng",
|
"sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng",
|
||||||
"pip install --upgrade pip",
|
"pip install --upgrade --upgrade-strategy eager pip",
|
||||||
"pip install .[sklearn,torch,testing,sentencepiece,torch-speech,vision,timm]",
|
"pip install -U --upgrade-strategy eager .[sklearn,torch,testing,sentencepiece,torch-speech,vision,timm,video]",
|
||||||
TORCH_SCATTER_INSTALL,
|
|
||||||
"pip install https://github.com/kpu/kenlm/archive/master.zip",
|
|
||||||
],
|
],
|
||||||
pytest_options={"rA": None},
|
pytest_options={"rA": None},
|
||||||
tests_to_run="tests/pipelines/"
|
marker="is_pipeline_test",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
pipelines_tf_job = CircleCIJob(
|
pipelines_tf_job = CircleCIJob(
|
||||||
"pipelines_tf",
|
"pipelines_tf",
|
||||||
|
additional_env={"RUN_PIPELINE_TESTS": True},
|
||||||
install_steps=[
|
install_steps=[
|
||||||
"pip install --upgrade pip",
|
"sudo apt-get -y update && sudo apt-get install -y cmake",
|
||||||
"pip install .[sklearn,tf-cpu,testing,sentencepiece]",
|
"pip install --upgrade --upgrade-strategy eager pip",
|
||||||
"pip install tensorflow_probability",
|
"pip install -U --upgrade-strategy eager .[sklearn,tf-cpu,testing,sentencepiece,vision]",
|
||||||
|
"pip install -U --upgrade-strategy eager tensorflow_probability",
|
||||||
],
|
],
|
||||||
pytest_options={"rA": None},
|
pytest_options={"rA": None},
|
||||||
tests_to_run="tests/pipelines/"
|
marker="is_pipeline_test",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@ -231,8 +347,8 @@ custom_tokenizers_job = CircleCIJob(
|
|||||||
"sudo cmake .. -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr/local\n"
|
"sudo cmake .. -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr/local\n"
|
||||||
"sudo make install\n",
|
"sudo make install\n",
|
||||||
},
|
},
|
||||||
"pip install --upgrade pip",
|
"pip install --upgrade --upgrade-strategy eager pip",
|
||||||
"pip install .[ja,testing,sentencepiece,jieba,spacy,ftfy,rjieba]",
|
"pip install -U --upgrade-strategy eager .[ja,testing,sentencepiece,jieba,spacy,ftfy,rjieba]",
|
||||||
"python -m unidic download",
|
"python -m unidic download",
|
||||||
],
|
],
|
||||||
parallelism=None,
|
parallelism=None,
|
||||||
@ -250,11 +366,10 @@ examples_torch_job = CircleCIJob(
|
|||||||
cache_name="torch_examples",
|
cache_name="torch_examples",
|
||||||
install_steps=[
|
install_steps=[
|
||||||
"sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng",
|
"sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng",
|
||||||
"pip install --upgrade pip",
|
"pip install --upgrade --upgrade-strategy eager pip",
|
||||||
"pip install .[sklearn,torch,sentencepiece,testing,torch-speech]",
|
"pip install -U --upgrade-strategy eager .[sklearn,torch,sentencepiece,testing,torch-speech]",
|
||||||
"pip install -r examples/pytorch/_tests_requirements.txt",
|
"pip install -U --upgrade-strategy eager -r examples/pytorch/_tests_requirements.txt",
|
||||||
],
|
],
|
||||||
tests_to_run="./examples/pytorch/",
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@ -262,11 +377,11 @@ examples_tensorflow_job = CircleCIJob(
|
|||||||
"examples_tensorflow",
|
"examples_tensorflow",
|
||||||
cache_name="tensorflow_examples",
|
cache_name="tensorflow_examples",
|
||||||
install_steps=[
|
install_steps=[
|
||||||
"pip install --upgrade pip",
|
"sudo apt-get -y update && sudo apt-get install -y cmake",
|
||||||
"pip install .[sklearn,tensorflow,sentencepiece,testing]",
|
"pip install --upgrade --upgrade-strategy eager pip",
|
||||||
"pip install -r examples/tensorflow/_tests_requirements.txt",
|
"pip install -U --upgrade-strategy eager .[sklearn,tensorflow,sentencepiece,testing]",
|
||||||
|
"pip install -U --upgrade-strategy eager -r examples/tensorflow/_tests_requirements.txt",
|
||||||
],
|
],
|
||||||
tests_to_run="./examples/tensorflow/",
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@ -274,22 +389,22 @@ examples_flax_job = CircleCIJob(
|
|||||||
"examples_flax",
|
"examples_flax",
|
||||||
cache_name="flax_examples",
|
cache_name="flax_examples",
|
||||||
install_steps=[
|
install_steps=[
|
||||||
"pip install --upgrade pip",
|
"pip install --upgrade --upgrade-strategy eager pip",
|
||||||
"pip install .[flax,testing,sentencepiece]",
|
"pip install -U --upgrade-strategy eager .[flax,testing,sentencepiece]",
|
||||||
"pip install -r examples/flax/_tests_requirements.txt",
|
"pip install -U --upgrade-strategy eager -r examples/flax/_tests_requirements.txt",
|
||||||
],
|
],
|
||||||
tests_to_run="./examples/flax/",
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
hub_job = CircleCIJob(
|
hub_job = CircleCIJob(
|
||||||
"hub",
|
"hub",
|
||||||
|
additional_env={"HUGGINGFACE_CO_STAGING": True},
|
||||||
install_steps=[
|
install_steps=[
|
||||||
"sudo apt-get -y update && sudo apt-get install git-lfs",
|
"sudo apt-get -y update && sudo apt-get install git-lfs",
|
||||||
'git config --global user.email "ci@dummy.com"',
|
'git config --global user.email "ci@dummy.com"',
|
||||||
'git config --global user.name "ci"',
|
'git config --global user.name "ci"',
|
||||||
"pip install --upgrade pip",
|
"pip install --upgrade --upgrade-strategy eager pip",
|
||||||
"pip install .[torch,sentencepiece,testing]",
|
"pip install -U --upgrade-strategy eager .[torch,sentencepiece,testing,vision]",
|
||||||
],
|
],
|
||||||
marker="is_staging_test",
|
marker="is_staging_test",
|
||||||
pytest_num_workers=1,
|
pytest_num_workers=1,
|
||||||
@ -299,31 +414,94 @@ hub_job = CircleCIJob(
|
|||||||
onnx_job = CircleCIJob(
|
onnx_job = CircleCIJob(
|
||||||
"onnx",
|
"onnx",
|
||||||
install_steps=[
|
install_steps=[
|
||||||
"pip install --upgrade pip",
|
"sudo apt-get -y update && sudo apt-get install -y cmake",
|
||||||
"pip install .[torch,tf,testing,sentencepiece,onnxruntime,vision,rjieba]",
|
"pip install --upgrade --upgrade-strategy eager pip",
|
||||||
|
"pip install -U --upgrade-strategy eager .[torch,tf,testing,sentencepiece,onnxruntime,vision,rjieba]",
|
||||||
],
|
],
|
||||||
pytest_options={"k onnx": None},
|
pytest_options={"k onnx": None},
|
||||||
pytest_num_workers=1,
|
pytest_num_workers=1,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
layoutlm_job = CircleCIJob(
|
exotic_models_job = CircleCIJob(
|
||||||
"layoutlmv2_and_v3",
|
"exotic_models",
|
||||||
install_steps=[
|
install_steps=[
|
||||||
"sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev",
|
"sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev",
|
||||||
"pip install --upgrade pip",
|
"pip install --upgrade --upgrade-strategy eager pip",
|
||||||
"pip install .[torch,testing,vision]",
|
"pip install -U --upgrade-strategy eager .[torch,testing,vision]",
|
||||||
"pip install torchvision",
|
"pip install -U --upgrade-strategy eager torchvision",
|
||||||
"pip install 'git+https://github.com/facebookresearch/detectron2.git'",
|
"pip install -U --upgrade-strategy eager scipy",
|
||||||
|
"pip install -U --upgrade-strategy eager 'git+https://github.com/facebookresearch/detectron2.git'",
|
||||||
"sudo apt install tesseract-ocr",
|
"sudo apt install tesseract-ocr",
|
||||||
"pip install pytesseract",
|
"pip install -U --upgrade-strategy eager pytesseract",
|
||||||
|
"pip install -U --upgrade-strategy eager natten",
|
||||||
|
# TODO (ydshieh): Remove this line once `https://github.com/facebookresearch/detectron2/issues/5010` is resolved
|
||||||
|
'pip install -U --upgrade-strategy eager "Pillow<10.0.0"',
|
||||||
|
],
|
||||||
|
tests_to_run=[
|
||||||
|
"tests/models/*layoutlmv*",
|
||||||
|
"tests/models/*nat",
|
||||||
|
"tests/models/deta",
|
||||||
],
|
],
|
||||||
tests_to_run="tests/models/*layoutlmv*",
|
|
||||||
pytest_num_workers=1,
|
pytest_num_workers=1,
|
||||||
pytest_options={"durations": 100},
|
pytest_options={"durations": 100},
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
repo_utils_job = CircleCIJob(
|
||||||
|
"repo_utils",
|
||||||
|
install_steps=[
|
||||||
|
"pip install --upgrade --upgrade-strategy eager pip",
|
||||||
|
"pip install -U --upgrade-strategy eager .[quality,testing,torch]",
|
||||||
|
],
|
||||||
|
parallelism=None,
|
||||||
|
pytest_num_workers=1,
|
||||||
|
resource_class="large",
|
||||||
|
tests_to_run="tests/repo_utils",
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# We also include a `dummy.py` file in the files to be doc-tested to prevent edge case failure. Otherwise, the pytest
|
||||||
|
# hangs forever during test collection while showing `collecting 0 items / 21 errors`. (To see this, we have to remove
|
||||||
|
# the bash output redirection.)
|
||||||
|
py_command = 'from utils.tests_fetcher import get_doctest_files; to_test = get_doctest_files() + ["dummy.py"]; to_test = " ".join(to_test); print(to_test)'
|
||||||
|
py_command = f"$(python3 -c '{py_command}')"
|
||||||
|
command = f'echo "{py_command}" > pr_documentation_tests_temp.txt'
|
||||||
|
doc_test_job = CircleCIJob(
|
||||||
|
"pr_documentation_tests",
|
||||||
|
additional_env={"TRANSFORMERS_VERBOSITY": "error", "DATASETS_VERBOSITY": "error", "SKIP_CUDA_DOCTEST": "1"},
|
||||||
|
install_steps=[
|
||||||
|
"sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng time ffmpeg",
|
||||||
|
"pip install --upgrade --upgrade-strategy eager pip",
|
||||||
|
"pip install -U --upgrade-strategy eager -e .[dev]",
|
||||||
|
"pip install -U --upgrade-strategy eager git+https://github.com/huggingface/accelerate",
|
||||||
|
"pip install --upgrade --upgrade-strategy eager pytest pytest-sugar",
|
||||||
|
"pip install -U --upgrade-strategy eager natten",
|
||||||
|
"find -name __pycache__ -delete",
|
||||||
|
"find . -name \*.pyc -delete",
|
||||||
|
# Add an empty file to keep the test step running correctly even no file is selected to be tested.
|
||||||
|
"touch dummy.py",
|
||||||
|
{
|
||||||
|
"name": "Get files to test",
|
||||||
|
"command": command,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "Show information in `Get files to test`",
|
||||||
|
"command":
|
||||||
|
"cat pr_documentation_tests_temp.txt"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "Get the last line in `pr_documentation_tests.txt`",
|
||||||
|
"command":
|
||||||
|
"tail -n1 pr_documentation_tests_temp.txt | tee pr_documentation_tests.txt"
|
||||||
|
},
|
||||||
|
],
|
||||||
|
tests_to_run="$(cat pr_documentation_tests.txt)", # noqa
|
||||||
|
pytest_options={"-doctest-modules": None, "doctest-glob": "*.md", "dist": "loadfile", "rvsA": None},
|
||||||
|
command_timeout=1200, # test cannot run longer than 1200 seconds
|
||||||
|
pytest_num_workers=1,
|
||||||
|
)
|
||||||
|
|
||||||
REGULAR_TESTS = [
|
REGULAR_TESTS = [
|
||||||
torch_and_tf_job,
|
torch_and_tf_job,
|
||||||
torch_and_flax_job,
|
torch_and_flax_job,
|
||||||
@ -333,7 +511,7 @@ REGULAR_TESTS = [
|
|||||||
custom_tokenizers_job,
|
custom_tokenizers_job,
|
||||||
hub_job,
|
hub_job,
|
||||||
onnx_job,
|
onnx_job,
|
||||||
layoutlm_job,
|
exotic_models_job,
|
||||||
]
|
]
|
||||||
EXAMPLES_TESTS = [
|
EXAMPLES_TESTS = [
|
||||||
examples_torch_job,
|
examples_torch_job,
|
||||||
@ -344,11 +522,15 @@ PIPELINE_TESTS = [
|
|||||||
pipelines_torch_job,
|
pipelines_torch_job,
|
||||||
pipelines_tf_job,
|
pipelines_tf_job,
|
||||||
]
|
]
|
||||||
|
REPO_UTIL_TESTS = [repo_utils_job]
|
||||||
|
DOC_TESTS = [doc_test_job]
|
||||||
|
|
||||||
|
|
||||||
def create_circleci_config(folder=None):
|
def create_circleci_config(folder=None):
|
||||||
if folder is None:
|
if folder is None:
|
||||||
folder = os.getcwd()
|
folder = os.getcwd()
|
||||||
|
# Used in CircleCIJob.to_dict() to expand the test list (for using parallelism)
|
||||||
|
os.environ["test_preparation_dir"] = folder
|
||||||
jobs = []
|
jobs = []
|
||||||
all_test_file = os.path.join(folder, "test_list.txt")
|
all_test_file = os.path.join(folder, "test_list.txt")
|
||||||
if os.path.exists(all_test_file):
|
if os.path.exists(all_test_file):
|
||||||
@ -368,17 +550,73 @@ def create_circleci_config(folder=None):
|
|||||||
if len(test_list) > 0:
|
if len(test_list) > 0:
|
||||||
jobs.extend(REGULAR_TESTS)
|
jobs.extend(REGULAR_TESTS)
|
||||||
|
|
||||||
|
extended_tests_to_run = set(test_list.split())
|
||||||
|
# Extend the test files for cross test jobs
|
||||||
|
for job in jobs:
|
||||||
|
if job.job_name in ["tests_torch_and_tf", "tests_torch_and_flax"]:
|
||||||
|
for test_path in copy.copy(extended_tests_to_run):
|
||||||
|
dir_path, fn = os.path.split(test_path)
|
||||||
|
if fn.startswith("test_modeling_tf_"):
|
||||||
|
fn = fn.replace("test_modeling_tf_", "test_modeling_")
|
||||||
|
elif fn.startswith("test_modeling_flax_"):
|
||||||
|
fn = fn.replace("test_modeling_flax_", "test_modeling_")
|
||||||
|
else:
|
||||||
|
if job.job_name == "test_torch_and_tf":
|
||||||
|
fn = fn.replace("test_modeling_", "test_modeling_tf_")
|
||||||
|
elif job.job_name == "test_torch_and_flax":
|
||||||
|
fn = fn.replace("test_modeling_", "test_modeling_flax_")
|
||||||
|
new_test_file = str(os.path.join(dir_path, fn))
|
||||||
|
if os.path.isfile(new_test_file):
|
||||||
|
if new_test_file not in extended_tests_to_run:
|
||||||
|
extended_tests_to_run.add(new_test_file)
|
||||||
|
extended_tests_to_run = sorted(extended_tests_to_run)
|
||||||
|
for job in jobs:
|
||||||
|
if job.job_name in ["tests_torch_and_tf", "tests_torch_and_flax"]:
|
||||||
|
job.tests_to_run = extended_tests_to_run
|
||||||
|
fn = "filtered_test_list_cross_tests.txt"
|
||||||
|
f_path = os.path.join(folder, fn)
|
||||||
|
with open(f_path, "w") as fp:
|
||||||
|
fp.write(" ".join(extended_tests_to_run))
|
||||||
|
|
||||||
example_file = os.path.join(folder, "examples_test_list.txt")
|
example_file = os.path.join(folder, "examples_test_list.txt")
|
||||||
if os.path.exists(example_file) and os.path.getsize(example_file) > 0:
|
if os.path.exists(example_file) and os.path.getsize(example_file) > 0:
|
||||||
jobs.extend(EXAMPLES_TESTS)
|
with open(example_file, "r", encoding="utf-8") as f:
|
||||||
|
example_tests = f.read().split(" ")
|
||||||
|
for job in EXAMPLES_TESTS:
|
||||||
|
framework = job.name.replace("examples_", "").replace("torch", "pytorch")
|
||||||
|
if example_tests == "all":
|
||||||
|
job.tests_to_run = [f"examples/{framework}"]
|
||||||
|
else:
|
||||||
|
job.tests_to_run = [f for f in example_tests if f.startswith(f"examples/{framework}")]
|
||||||
|
|
||||||
|
if len(job.tests_to_run) > 0:
|
||||||
|
jobs.append(job)
|
||||||
|
|
||||||
if len(jobs) > 0:
|
doctest_file = os.path.join(folder, "doctest_list.txt")
|
||||||
config = {"version": "2.1"}
|
if os.path.exists(doctest_file):
|
||||||
config["parameters"] = {"tests_to_run": {"type": "string", "default": test_list}}
|
with open(doctest_file) as f:
|
||||||
config["jobs"] = {j.job_name: j.to_dict() for j in jobs}
|
doctest_list = f.read()
|
||||||
config["workflows"] = {"version": 2, "run_tests": {"jobs": [j.job_name for j in jobs]}}
|
else:
|
||||||
with open(os.path.join(folder, "generated_config.yml"), "w") as f:
|
doctest_list = []
|
||||||
f.write(yaml.dump(config, indent=2, width=1000000, sort_keys=False))
|
if len(doctest_list) > 0:
|
||||||
|
jobs.extend(DOC_TESTS)
|
||||||
|
|
||||||
|
repo_util_file = os.path.join(folder, "test_repo_utils.txt")
|
||||||
|
if os.path.exists(repo_util_file) and os.path.getsize(repo_util_file) > 0:
|
||||||
|
jobs.extend(REPO_UTIL_TESTS)
|
||||||
|
|
||||||
|
if len(jobs) == 0:
|
||||||
|
jobs = [EmptyJob()]
|
||||||
|
config = {"version": "2.1"}
|
||||||
|
config["parameters"] = {
|
||||||
|
# Only used to accept the parameters from the trigger
|
||||||
|
"nightly": {"type": "boolean", "default": False},
|
||||||
|
"tests_to_run": {"type": "string", "default": test_list},
|
||||||
|
}
|
||||||
|
config["jobs"] = {j.job_name: j.to_dict() for j in jobs}
|
||||||
|
config["workflows"] = {"version": 2, "run_tests": {"jobs": [j.job_name for j in jobs]}}
|
||||||
|
with open(os.path.join(folder, "generated_config.yml"), "w") as f:
|
||||||
|
f.write(yaml.dump(config, indent=2, width=1000000, sort_keys=False))
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
|||||||
75
.github/ISSUE_TEMPLATE/bug-report.yml
vendored
75
.github/ISSUE_TEMPLATE/bug-report.yml
vendored
@ -17,58 +17,55 @@ body:
|
|||||||
description: |
|
description: |
|
||||||
Your issue will be replied to more quickly if you can figure out the right person to tag with @
|
Your issue will be replied to more quickly if you can figure out the right person to tag with @
|
||||||
If you know how to use git blame, that is the easiest way, otherwise, here is a rough guide of **who to tag**.
|
If you know how to use git blame, that is the easiest way, otherwise, here is a rough guide of **who to tag**.
|
||||||
|
|
||||||
|
All issues are read by one of the core maintainers, so if you don't know who to tag, just leave this blank and
|
||||||
|
a core maintainer will ping the right person.
|
||||||
|
|
||||||
Please tag fewer than 3 people.
|
Please tag fewer than 3 people.
|
||||||
|
|
||||||
Models:
|
Models:
|
||||||
|
|
||||||
- ALBERT, BERT, XLM, DeBERTa, DeBERTa-v2, ELECTRA, MobileBert, SqueezeBert: `@LysandreJik`
|
- text models: @ArthurZucker and @younesbelkada
|
||||||
- T5, Pegasus, EncoderDecoder: `@patrickvonplaten`
|
- vision models: @amyeroberts
|
||||||
- Blenderbot, MBART, BART, Marian, Pegasus: `@patil-suraj`
|
- speech models: @sanchit-gandhi
|
||||||
- Reformer, TransfoXL, XLNet, FNet: `@patrickvonplaten`
|
- graph models: @clefourrier
|
||||||
- Longformer, BigBird: `@ydshieh`
|
|
||||||
- FSMT: `@stas00`
|
|
||||||
- Funnel: `@sgugger`
|
|
||||||
- GPT-2, GPT: `@patil-suraj`, `@patrickvonplaten`, `@LysandreJik`
|
|
||||||
- RAG, DPR: `@patrickvonplaten`, `@lhoestq`
|
|
||||||
- TensorFlow: `@Rocketknight1`
|
|
||||||
- JAX/Flax: `@patil-suraj`
|
|
||||||
- TAPAS, LayoutLM, LayoutLMv2, LUKE, ViT, BEiT, DEiT, DETR, CANINE: `@NielsRogge`
|
|
||||||
- GPT-Neo, GPT-J, CLIP: `@patil-suraj`
|
|
||||||
- Wav2Vec2, HuBERT, UniSpeech, UniSpeechSAT, SEW, SEW-D: `@patrickvonplaten`, `@anton-l`
|
|
||||||
- SpeechEncoderDecoder, Speech2Text, Speech2Text2: `@sanchit-gandhi`, `@patrickvonplaten`, `@anton-l`
|
|
||||||
|
|
||||||
If the model isn't in the list, ping `@LysandreJik` who will redirect you to the correct contributor.
|
|
||||||
|
|
||||||
Library:
|
Library:
|
||||||
- Benchmarks: `@patrickvonplaten`
|
|
||||||
- Deepspeed: `@stas00`
|
- flax: @sanchit-gandhi
|
||||||
- Ray/raytune: `@richardliaw`, `@amogkam`
|
- generate: @gante
|
||||||
- Text generation: `@patrickvonplaten`, `@Narsil`, `@gante`
|
- pipelines: @Narsil
|
||||||
- Tokenizers: `@SaulLu`
|
- tensorflow: @gante and @Rocketknight1
|
||||||
- Trainer: `@sgugger`
|
- tokenizers: @ArthurZucker
|
||||||
- Pipelines: `@Narsil`
|
- trainer: @sgugger
|
||||||
- Speech: `@patrickvonplaten`, `@anton-l`, `@sanchit-gandhi`
|
|
||||||
- Vision: `@NielsRogge`, `@sgugger`
|
Integrations:
|
||||||
|
|
||||||
Documentation: `@sgugger`, `@stevhliu`
|
- deepspeed: HF Trainer/Accelerate: @pacman100
|
||||||
|
- ray/raytune: @richardliaw, @amogkam
|
||||||
|
- Big Model Inference: @sgugger @muellerzr
|
||||||
|
|
||||||
|
Documentation: @sgugger, @stevhliu and @MKhalusova
|
||||||
|
|
||||||
Model hub:
|
Model hub:
|
||||||
|
|
||||||
- for issues with a model, report at https://discuss.huggingface.co/ and tag the model's creator.
|
- for issues with a model, report at https://discuss.huggingface.co/ and tag the model's creator.
|
||||||
|
|
||||||
HF projects:
|
HF projects:
|
||||||
|
|
||||||
|
- accelerate: [different repo](https://github.com/huggingface/accelerate)
|
||||||
- datasets: [different repo](https://github.com/huggingface/datasets)
|
- datasets: [different repo](https://github.com/huggingface/datasets)
|
||||||
|
- diffusers: [different repo](https://github.com/huggingface/diffusers)
|
||||||
- rust tokenizers: [different repo](https://github.com/huggingface/tokenizers)
|
- rust tokenizers: [different repo](https://github.com/huggingface/tokenizers)
|
||||||
|
|
||||||
|
Maintained examples (not research project or legacy):
|
||||||
|
|
||||||
|
- Flax: @sanchit-gandhi
|
||||||
|
- PyTorch: @sgugger
|
||||||
|
- TensorFlow: @Rocketknight1
|
||||||
|
|
||||||
Examples:
|
Research projects are not maintained and should be taken as is.
|
||||||
|
|
||||||
- maintained examples (not research project or legacy): `@sgugger`, `@patil-suraj`
|
|
||||||
|
|
||||||
For research projetcs, please ping the contributor directly. For example, on the following projects:
|
|
||||||
|
|
||||||
- research_projects/bert-loses-patience: `@JetRunner`
|
|
||||||
- research_projects/distillation: `@VictorSanh`
|
|
||||||
placeholder: "@Username ..."
|
placeholder: "@Username ..."
|
||||||
|
|
||||||
- type: checkboxes
|
- type: checkboxes
|
||||||
|
|||||||
46
.github/ISSUE_TEMPLATE/i18n.md
vendored
Normal file
46
.github/ISSUE_TEMPLATE/i18n.md
vendored
Normal file
@ -0,0 +1,46 @@
|
|||||||
|
---
|
||||||
|
name: 🌐 Translating a new language?
|
||||||
|
about: Start a new translation effort in your language
|
||||||
|
title: '[i18n-<languageCode>] Translating docs to <languageName>'
|
||||||
|
labels: WIP
|
||||||
|
assignees: ''
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
<!--
|
||||||
|
Note: Please search to see if an issue already exists for the language you are trying to translate.
|
||||||
|
-->
|
||||||
|
|
||||||
|
Hi!
|
||||||
|
|
||||||
|
Let's bring the documentation to all the <languageName>-speaking community 🌐 (currently 0 out of 267 complete)
|
||||||
|
|
||||||
|
Who would want to translate? Please follow the 🤗 [TRANSLATING guide](https://github.com/huggingface/transformers/blob/main/docs/TRANSLATING.md). Here is a list of the files ready for translation. Let us know in this issue if you'd like to translate any, and we'll add your name to the list.
|
||||||
|
|
||||||
|
Some notes:
|
||||||
|
|
||||||
|
* Please translate using an informal tone (imagine you are talking with a friend about transformers 🤗).
|
||||||
|
* Please translate in a gender-neutral way.
|
||||||
|
* Add your translations to the folder called `<languageCode>` inside the [source folder](https://github.com/huggingface/transformers/tree/main/docs/source).
|
||||||
|
* Register your translation in `<languageCode>/_toctree.yml`; please follow the order of the [English version](https://github.com/huggingface/transformers/blob/main/docs/source/en/_toctree.yml).
|
||||||
|
* Once you're finished, open a pull request and tag this issue by including #issue-number in the description, where issue-number is the number of this issue. Please ping @ArthurZucker, @sgugger for review.
|
||||||
|
* 🙋 If you'd like others to help you with the translation, you can also post in the 🤗 [forums](https://discuss.huggingface.co/).
|
||||||
|
|
||||||
|
## Get Started section
|
||||||
|
|
||||||
|
- [ ] [index.md](https://github.com/huggingface/transformers/blob/main/docs/source/en/index.md) https://github.com/huggingface/transformers/pull/20180
|
||||||
|
- [ ] [quicktour.md](https://github.com/huggingface/transformers/blob/main/docs/source/en/quicktour.md) (waiting for initial PR to go through)
|
||||||
|
- [ ] [installation.md](https://github.com/huggingface/transformers/blob/main/docs/source/en/installation.md).
|
||||||
|
|
||||||
|
## Tutorial section
|
||||||
|
- [ ] [pipeline_tutorial.md](https://github.com/huggingface/transformers/blob/main/docs/source/en/pipeline_tutorial.md)
|
||||||
|
- [ ] [autoclass_tutorial.md](https://github.com/huggingface/transformers/blob/master/docs/source/autoclass_tutorial.md)
|
||||||
|
- [ ] [preprocessing.md](https://github.com/huggingface/transformers/blob/main/docs/source/en/preprocessing.md)
|
||||||
|
- [ ] [training.md](https://github.com/huggingface/transformers/blob/main/docs/source/en/training.md)
|
||||||
|
- [ ] [accelerate.md](https://github.com/huggingface/transformers/blob/main/docs/source/en/accelerate.md)
|
||||||
|
- [ ] [model_sharing.md](https://github.com/huggingface/transformers/blob/main/docs/source/en/model_sharing.md)
|
||||||
|
- [ ] [multilingual.md](https://github.com/huggingface/transformers/blob/main/docs/source/en/multilingual.md)
|
||||||
|
|
||||||
|
<!--
|
||||||
|
Keep on adding more as you go 🔥
|
||||||
|
-->
|
||||||
40
.github/PULL_REQUEST_TEMPLATE.md
vendored
40
.github/PULL_REQUEST_TEMPLATE.md
vendored
@ -39,36 +39,38 @@ members/contributors who may be interested in your PR.
|
|||||||
|
|
||||||
Models:
|
Models:
|
||||||
|
|
||||||
- albert, bert, xlm: @LysandreJik
|
- text models: @ArthurZucker and @younesbelkada
|
||||||
- blenderbot, bart, marian, pegasus, encoderdecoder, t5: @patrickvonplaten, @patil-suraj
|
- vision models: @amyeroberts
|
||||||
- longformer, reformer, transfoxl, xlnet: @patrickvonplaten
|
- speech models: @sanchit-gandhi
|
||||||
- fsmt: @stas00
|
- graph models: @clefourrier
|
||||||
- funnel: @sgugger
|
|
||||||
- gpt2: @patrickvonplaten, @LysandreJik
|
|
||||||
- rag: @patrickvonplaten, @lhoestq
|
|
||||||
- tensorflow: @LysandreJik
|
|
||||||
|
|
||||||
Library:
|
Library:
|
||||||
|
|
||||||
- benchmarks: @patrickvonplaten
|
- flax: @sanchit-gandhi
|
||||||
- deepspeed: @stas00
|
- generate: @gante
|
||||||
- ray/raytune: @richardliaw, @amogkam
|
- pipelines: @Narsil
|
||||||
- text generation: @patrickvonplaten
|
- tensorflow: @gante and @Rocketknight1
|
||||||
- tokenizers: @n1t0, @LysandreJik
|
- tokenizers: @ArthurZucker
|
||||||
- trainer: @sgugger
|
- trainer: @sgugger
|
||||||
- pipelines: @LysandreJik
|
|
||||||
|
|
||||||
Documentation: @sgugger
|
Integrations:
|
||||||
|
|
||||||
|
- deepspeed: HF Trainer/Accelerate: @pacman100
|
||||||
|
- ray/raytune: @richardliaw, @amogkam
|
||||||
|
|
||||||
|
Documentation: @sgugger, @stevhliu and @MKhalusova
|
||||||
|
|
||||||
HF projects:
|
HF projects:
|
||||||
|
|
||||||
|
- accelerate: [different repo](https://github.com/huggingface/accelerate)
|
||||||
- datasets: [different repo](https://github.com/huggingface/datasets)
|
- datasets: [different repo](https://github.com/huggingface/datasets)
|
||||||
|
- diffusers: [different repo](https://github.com/huggingface/diffusers)
|
||||||
- rust tokenizers: [different repo](https://github.com/huggingface/tokenizers)
|
- rust tokenizers: [different repo](https://github.com/huggingface/tokenizers)
|
||||||
|
|
||||||
Examples:
|
Maintained examples (not research project or legacy):
|
||||||
|
|
||||||
- maintained examples (not research project or legacy): @sgugger, @patil-suraj
|
- Flax: @sanchit-gandhi
|
||||||
- research_projects/bert-loses-patience: @JetRunner
|
- PyTorch: @sgugger
|
||||||
- research_projects/distillation: @VictorSanh
|
- TensorFlow: @Rocketknight1
|
||||||
|
|
||||||
-->
|
-->
|
||||||
|
|||||||
2
.github/conda/meta.yaml
vendored
2
.github/conda/meta.yaml
vendored
@ -16,7 +16,6 @@ requirements:
|
|||||||
- pip
|
- pip
|
||||||
- numpy >=1.17
|
- numpy >=1.17
|
||||||
- dataclasses
|
- dataclasses
|
||||||
- importlib_metadata
|
|
||||||
- huggingface_hub
|
- huggingface_hub
|
||||||
- packaging
|
- packaging
|
||||||
- filelock
|
- filelock
|
||||||
@ -31,7 +30,6 @@ requirements:
|
|||||||
- python
|
- python
|
||||||
- numpy >=1.17
|
- numpy >=1.17
|
||||||
- dataclasses
|
- dataclasses
|
||||||
- importlib_metadata
|
|
||||||
- huggingface_hub
|
- huggingface_hub
|
||||||
- packaging
|
- packaging
|
||||||
- filelock
|
- filelock
|
||||||
|
|||||||
4
.github/workflows/add-model-like.yml
vendored
4
.github/workflows/add-model-like.yml
vendored
@ -16,7 +16,7 @@ jobs:
|
|||||||
name: "Add new model like template tests"
|
name: "Add new model like template tests"
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v2
|
- uses: actions/checkout@v3
|
||||||
|
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
@ -74,7 +74,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Test suite reports artifacts
|
- name: Test suite reports artifacts
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
uses: actions/upload-artifact@v2
|
uses: actions/upload-artifact@v3
|
||||||
with:
|
with:
|
||||||
name: run_all_tests_new_models_test_reports
|
name: run_all_tests_new_models_test_reports
|
||||||
path: reports/tests_new_models
|
path: reports/tests_new_models
|
||||||
|
|||||||
161
.github/workflows/build-docker-images.yml
vendored
161
.github/workflows/build-docker-images.yml
vendored
@ -3,7 +3,7 @@ name: Build docker images (scheduled)
|
|||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- docker-image*
|
- build_ci_docker_image*
|
||||||
repository_dispatch:
|
repository_dispatch:
|
||||||
workflow_call:
|
workflow_call:
|
||||||
inputs:
|
inputs:
|
||||||
@ -11,7 +11,7 @@ on:
|
|||||||
required: true
|
required: true
|
||||||
type: string
|
type: string
|
||||||
schedule:
|
schedule:
|
||||||
- cron: "0 1 * * *"
|
- cron: "17 0 * * *"
|
||||||
|
|
||||||
concurrency:
|
concurrency:
|
||||||
group: docker-images-builds
|
group: docker-images-builds
|
||||||
@ -22,21 +22,31 @@ jobs:
|
|||||||
name: "Latest PyTorch + TensorFlow [dev]"
|
name: "Latest PyTorch + TensorFlow [dev]"
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
|
- name: Cleanup disk
|
||||||
|
run: |
|
||||||
|
sudo ls -l /usr/local/lib/
|
||||||
|
sudo ls -l /usr/share/
|
||||||
|
sudo du -sh /usr/local/lib/
|
||||||
|
sudo du -sh /usr/share/
|
||||||
|
sudo rm -rf /usr/local/lib/android
|
||||||
|
sudo rm -rf /usr/share/dotnet
|
||||||
|
sudo du -sh /usr/local/lib/
|
||||||
|
sudo du -sh /usr/share/
|
||||||
-
|
-
|
||||||
name: Set up Docker Buildx
|
name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v1
|
uses: docker/setup-buildx-action@v2
|
||||||
-
|
-
|
||||||
name: Check out code
|
name: Check out code
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v3
|
||||||
-
|
-
|
||||||
name: Login to DockerHub
|
name: Login to DockerHub
|
||||||
uses: docker/login-action@v1
|
uses: docker/login-action@v2
|
||||||
with:
|
with:
|
||||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||||
-
|
-
|
||||||
name: Build and push
|
name: Build and push
|
||||||
uses: docker/build-push-action@v2
|
uses: docker/build-push-action@v3
|
||||||
with:
|
with:
|
||||||
context: ./docker/transformers-all-latest-gpu
|
context: ./docker/transformers-all-latest-gpu
|
||||||
build-args: |
|
build-args: |
|
||||||
@ -49,7 +59,7 @@ jobs:
|
|||||||
# This condition allows `schedule` events, or `push` events that trigger this workflow NOT via `workflow_call`.
|
# This condition allows `schedule` events, or `push` events that trigger this workflow NOT via `workflow_call`.
|
||||||
# The later case is useful for manual image building for debugging purpose. Use another tag in this case!
|
# The later case is useful for manual image building for debugging purpose. Use another tag in this case!
|
||||||
if: inputs.image_postfix != '-push-ci'
|
if: inputs.image_postfix != '-push-ci'
|
||||||
uses: docker/build-push-action@v2
|
uses: docker/build-push-action@v3
|
||||||
with:
|
with:
|
||||||
context: ./docker/transformers-all-latest-gpu
|
context: ./docker/transformers-all-latest-gpu
|
||||||
build-args: |
|
build-args: |
|
||||||
@ -57,67 +67,76 @@ jobs:
|
|||||||
push: true
|
push: true
|
||||||
tags: huggingface/transformers-all-latest-gpu-push-ci
|
tags: huggingface/transformers-all-latest-gpu-push-ci
|
||||||
|
|
||||||
latest-with-torch-nightly-docker:
|
|
||||||
name: "Nightly PyTorch + Stable TensorFlow"
|
|
||||||
# Push CI doesn't need this image
|
|
||||||
if: inputs.image_postfix != '-push-ci'
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
-
|
|
||||||
name: Set up Docker Buildx
|
|
||||||
uses: docker/setup-buildx-action@v1
|
|
||||||
-
|
|
||||||
name: Check out code
|
|
||||||
uses: actions/checkout@v2
|
|
||||||
-
|
|
||||||
name: Login to DockerHub
|
|
||||||
uses: docker/login-action@v1
|
|
||||||
with:
|
|
||||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
|
||||||
password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
|
||||||
-
|
|
||||||
name: Build and push
|
|
||||||
uses: docker/build-push-action@v2
|
|
||||||
with:
|
|
||||||
context: ./docker/transformers-all-latest-gpu
|
|
||||||
build-args: |
|
|
||||||
REF=main
|
|
||||||
PYTORCH=pre
|
|
||||||
push: true
|
|
||||||
tags: huggingface/transformers-all-latest-torch-nightly-gpu
|
|
||||||
|
|
||||||
latest-torch-deepspeed-docker:
|
latest-torch-deepspeed-docker:
|
||||||
name: "Latest PyTorch + DeepSpeed"
|
name: "Latest PyTorch + DeepSpeed"
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
|
- name: Cleanup disk
|
||||||
|
run: |
|
||||||
|
sudo ls -l /usr/local/lib/
|
||||||
|
sudo ls -l /usr/share/
|
||||||
|
sudo du -sh /usr/local/lib/
|
||||||
|
sudo du -sh /usr/share/
|
||||||
|
sudo rm -rf /usr/local/lib/android
|
||||||
|
sudo rm -rf /usr/share/dotnet
|
||||||
|
sudo du -sh /usr/local/lib/
|
||||||
|
sudo du -sh /usr/share/
|
||||||
-
|
-
|
||||||
name: Set up Docker Buildx
|
name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v1
|
uses: docker/setup-buildx-action@v2
|
||||||
-
|
-
|
||||||
name: Check out code
|
name: Check out code
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v3
|
||||||
-
|
-
|
||||||
name: Login to DockerHub
|
name: Login to DockerHub
|
||||||
uses: docker/login-action@v1
|
uses: docker/login-action@v2
|
||||||
with:
|
with:
|
||||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||||
-
|
-
|
||||||
name: Build and push
|
name: Build and push
|
||||||
uses: docker/build-push-action@v2
|
uses: docker/build-push-action@v3
|
||||||
with:
|
with:
|
||||||
context: ./docker/transformers-pytorch-deepspeed-latest-gpu
|
context: ./docker/transformers-pytorch-deepspeed-latest-gpu
|
||||||
build-args: |
|
build-args: |
|
||||||
REF=main
|
REF=main
|
||||||
push: true
|
push: true
|
||||||
tags: huggingface/transformers-pytorch-deepspeed-latest-gpu${{ inputs.image_postfix }}
|
tags: huggingface/transformers-pytorch-deepspeed-latest-gpu${{ inputs.image_postfix }}
|
||||||
|
|
||||||
|
# Can't build 2 images in a single job `latest-torch-deepspeed-docker` (for `nvcr.io/nvidia`)
|
||||||
|
latest-torch-deepspeed-docker-for-push-ci-daily-build:
|
||||||
|
name: "Latest PyTorch + DeepSpeed (Push CI - Daily Build)"
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Cleanup disk
|
||||||
|
run: |
|
||||||
|
sudo ls -l /usr/local/lib/
|
||||||
|
sudo ls -l /usr/share/
|
||||||
|
sudo du -sh /usr/local/lib/
|
||||||
|
sudo du -sh /usr/share/
|
||||||
|
sudo rm -rf /usr/local/lib/android
|
||||||
|
sudo rm -rf /usr/share/dotnet
|
||||||
|
sudo du -sh /usr/local/lib/
|
||||||
|
sudo du -sh /usr/share/
|
||||||
|
-
|
||||||
|
name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@v2
|
||||||
|
-
|
||||||
|
name: Check out code
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
-
|
||||||
|
name: Login to DockerHub
|
||||||
|
uses: docker/login-action@v2
|
||||||
|
with:
|
||||||
|
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
|
password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||||
# Push CI images still need to be re-built daily
|
# Push CI images still need to be re-built daily
|
||||||
-
|
-
|
||||||
name: Build and push (for Push CI) in a daily basis
|
name: Build and push (for Push CI) in a daily basis
|
||||||
# This condition allows `schedule` events, or `push` events that trigger this workflow NOT via `workflow_call`.
|
# This condition allows `schedule` events, or `push` events that trigger this workflow NOT via `workflow_call`.
|
||||||
# The later case is useful for manual image building for debugging purpose. Use another tag in this case!
|
# The later case is useful for manual image building for debugging purpose. Use another tag in this case!
|
||||||
if: inputs.image_postfix != '-push-ci'
|
if: inputs.image_postfix != '-push-ci'
|
||||||
uses: docker/build-push-action@v2
|
uses: docker/build-push-action@v3
|
||||||
with:
|
with:
|
||||||
context: ./docker/transformers-pytorch-deepspeed-latest-gpu
|
context: ./docker/transformers-pytorch-deepspeed-latest-gpu
|
||||||
build-args: |
|
build-args: |
|
||||||
@ -125,34 +144,6 @@ jobs:
|
|||||||
push: true
|
push: true
|
||||||
tags: huggingface/transformers-pytorch-deepspeed-latest-gpu-push-ci
|
tags: huggingface/transformers-pytorch-deepspeed-latest-gpu-push-ci
|
||||||
|
|
||||||
nightly-torch-deepspeed-docker:
|
|
||||||
name: "Nightly PyTorch + DeepSpeed"
|
|
||||||
# Push CI doesn't need this image
|
|
||||||
if: inputs.image_postfix != '-push-ci'
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
-
|
|
||||||
name: Set up Docker Buildx
|
|
||||||
uses: docker/setup-buildx-action@v1
|
|
||||||
-
|
|
||||||
name: Check out code
|
|
||||||
uses: actions/checkout@v2
|
|
||||||
-
|
|
||||||
name: Login to DockerHub
|
|
||||||
uses: docker/login-action@v1
|
|
||||||
with:
|
|
||||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
|
||||||
password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
|
||||||
-
|
|
||||||
name: Build and push
|
|
||||||
uses: docker/build-push-action@v2
|
|
||||||
with:
|
|
||||||
context: ./docker/transformers-pytorch-deepspeed-nightly-gpu
|
|
||||||
build-args: |
|
|
||||||
REF=main
|
|
||||||
push: true
|
|
||||||
tags: huggingface/transformers-pytorch-deepspeed-nightly-gpu
|
|
||||||
|
|
||||||
doc-builder:
|
doc-builder:
|
||||||
name: "Doc builder"
|
name: "Doc builder"
|
||||||
# Push CI doesn't need this image
|
# Push CI doesn't need this image
|
||||||
@ -161,19 +152,19 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
-
|
-
|
||||||
name: Set up Docker Buildx
|
name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v1
|
uses: docker/setup-buildx-action@v2
|
||||||
-
|
-
|
||||||
name: Check out code
|
name: Check out code
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v3
|
||||||
-
|
-
|
||||||
name: Login to DockerHub
|
name: Login to DockerHub
|
||||||
uses: docker/login-action@v1
|
uses: docker/login-action@v2
|
||||||
with:
|
with:
|
||||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||||
-
|
-
|
||||||
name: Build and push
|
name: Build and push
|
||||||
uses: docker/build-push-action@v2
|
uses: docker/build-push-action@v3
|
||||||
with:
|
with:
|
||||||
context: ./docker/transformers-doc-builder
|
context: ./docker/transformers-doc-builder
|
||||||
push: true
|
push: true
|
||||||
@ -185,21 +176,31 @@ jobs:
|
|||||||
if: inputs.image_postfix != '-push-ci'
|
if: inputs.image_postfix != '-push-ci'
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
|
- name: Cleanup disk
|
||||||
|
run: |
|
||||||
|
sudo ls -l /usr/local/lib/
|
||||||
|
sudo ls -l /usr/share/
|
||||||
|
sudo du -sh /usr/local/lib/
|
||||||
|
sudo du -sh /usr/share/
|
||||||
|
sudo rm -rf /usr/local/lib/android
|
||||||
|
sudo rm -rf /usr/share/dotnet
|
||||||
|
sudo du -sh /usr/local/lib/
|
||||||
|
sudo du -sh /usr/share/
|
||||||
-
|
-
|
||||||
name: Set up Docker Buildx
|
name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v1
|
uses: docker/setup-buildx-action@v2
|
||||||
-
|
-
|
||||||
name: Check out code
|
name: Check out code
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v3
|
||||||
-
|
-
|
||||||
name: Login to DockerHub
|
name: Login to DockerHub
|
||||||
uses: docker/login-action@v1
|
uses: docker/login-action@v2
|
||||||
with:
|
with:
|
||||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||||
-
|
-
|
||||||
name: Build and push
|
name: Build and push
|
||||||
uses: docker/build-push-action@v2
|
uses: docker/build-push-action@v3
|
||||||
with:
|
with:
|
||||||
context: ./docker/transformers-pytorch-gpu
|
context: ./docker/transformers-pytorch-gpu
|
||||||
build-args: |
|
build-args: |
|
||||||
@ -215,19 +216,19 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
-
|
-
|
||||||
name: Set up Docker Buildx
|
name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v1
|
uses: docker/setup-buildx-action@v2
|
||||||
-
|
-
|
||||||
name: Check out code
|
name: Check out code
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v3
|
||||||
-
|
-
|
||||||
name: Login to DockerHub
|
name: Login to DockerHub
|
||||||
uses: docker/login-action@v1
|
uses: docker/login-action@v2
|
||||||
with:
|
with:
|
||||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||||
-
|
-
|
||||||
name: Build and push
|
name: Build and push
|
||||||
uses: docker/build-push-action@v2
|
uses: docker/build-push-action@v3
|
||||||
with:
|
with:
|
||||||
context: ./docker/transformers-tensorflow-gpu
|
context: ./docker/transformers-tensorflow-gpu
|
||||||
build-args: |
|
build-args: |
|
||||||
|
|||||||
85
.github/workflows/build-nightly-ci-docker-images.yml
vendored
Normal file
85
.github/workflows/build-nightly-ci-docker-images.yml
vendored
Normal file
@ -0,0 +1,85 @@
|
|||||||
|
name: Build docker images (Nightly CI)
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_call:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- build_nightly_ci_docker_image*
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: docker-images-builds
|
||||||
|
cancel-in-progress: false
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
latest-with-torch-nightly-docker:
|
||||||
|
name: "Nightly PyTorch + Stable TensorFlow"
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Cleanup disk
|
||||||
|
run: |
|
||||||
|
sudo ls -l /usr/local/lib/
|
||||||
|
sudo ls -l /usr/share/
|
||||||
|
sudo du -sh /usr/local/lib/
|
||||||
|
sudo du -sh /usr/share/
|
||||||
|
sudo rm -rf /usr/local/lib/android
|
||||||
|
sudo rm -rf /usr/share/dotnet
|
||||||
|
sudo du -sh /usr/local/lib/
|
||||||
|
sudo du -sh /usr/share/
|
||||||
|
-
|
||||||
|
name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@v2
|
||||||
|
-
|
||||||
|
name: Check out code
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
-
|
||||||
|
name: Login to DockerHub
|
||||||
|
uses: docker/login-action@v2
|
||||||
|
with:
|
||||||
|
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
|
password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||||
|
-
|
||||||
|
name: Build and push
|
||||||
|
uses: docker/build-push-action@v3
|
||||||
|
with:
|
||||||
|
context: ./docker/transformers-all-latest-gpu
|
||||||
|
build-args: |
|
||||||
|
REF=main
|
||||||
|
PYTORCH=pre
|
||||||
|
push: true
|
||||||
|
tags: huggingface/transformers-all-latest-torch-nightly-gpu
|
||||||
|
|
||||||
|
nightly-torch-deepspeed-docker:
|
||||||
|
name: "Nightly PyTorch + DeepSpeed"
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Cleanup disk
|
||||||
|
run: |
|
||||||
|
sudo ls -l /usr/local/lib/
|
||||||
|
sudo ls -l /usr/share/
|
||||||
|
sudo du -sh /usr/local/lib/
|
||||||
|
sudo du -sh /usr/share/
|
||||||
|
sudo rm -rf /usr/local/lib/android
|
||||||
|
sudo rm -rf /usr/share/dotnet
|
||||||
|
sudo du -sh /usr/local/lib/
|
||||||
|
sudo du -sh /usr/share/
|
||||||
|
-
|
||||||
|
name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@v2
|
||||||
|
-
|
||||||
|
name: Check out code
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
-
|
||||||
|
name: Login to DockerHub
|
||||||
|
uses: docker/login-action@v2
|
||||||
|
with:
|
||||||
|
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
|
password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||||
|
-
|
||||||
|
name: Build and push
|
||||||
|
uses: docker/build-push-action@v3
|
||||||
|
with:
|
||||||
|
context: ./docker/transformers-pytorch-deepspeed-nightly-gpu
|
||||||
|
build-args: |
|
||||||
|
REF=main
|
||||||
|
push: true
|
||||||
|
tags: huggingface/transformers-pytorch-deepspeed-nightly-gpu
|
||||||
@ -3,7 +3,7 @@ name: Build docker images (Past CI)
|
|||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- past-ci-docker-image*
|
- build_past_ci_docker_image*
|
||||||
|
|
||||||
concurrency:
|
concurrency:
|
||||||
group: docker-images-builds
|
group: docker-images-builds
|
||||||
@ -15,28 +15,40 @@ jobs:
|
|||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
version: ["1.11", "1.10", "1.9", "1.8", "1.7", "1.6", "1.5", "1.4"]
|
version: ["1.13", "1.12", "1.11", "1.10"]
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
-
|
-
|
||||||
name: Set up Docker Buildx
|
name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v1
|
uses: docker/setup-buildx-action@v2
|
||||||
-
|
-
|
||||||
name: Check out code
|
name: Check out code
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v3
|
||||||
|
-
|
||||||
|
id: get-base-image
|
||||||
|
name: Get Base Image
|
||||||
|
env:
|
||||||
|
framework_version: ${{ matrix.version }}
|
||||||
|
run: |
|
||||||
|
echo "base_image=$(python3 -c 'import os; from utils.past_ci_versions import past_versions_testing; base_image = past_versions_testing["pytorch"][os.environ["framework_version"]]["base_image"]; print(base_image)')" >> $GITHUB_OUTPUT
|
||||||
|
-
|
||||||
|
name: Print Base Image
|
||||||
|
run: |
|
||||||
|
echo ${{ steps.get-base-image.outputs.base_image }}
|
||||||
-
|
-
|
||||||
name: Login to DockerHub
|
name: Login to DockerHub
|
||||||
uses: docker/login-action@v1
|
uses: docker/login-action@v2
|
||||||
with:
|
with:
|
||||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||||
-
|
-
|
||||||
name: Build and push
|
name: Build and push
|
||||||
uses: docker/build-push-action@v2
|
uses: docker/build-push-action@v3
|
||||||
with:
|
with:
|
||||||
context: ./docker/transformers-past-gpu
|
context: ./docker/transformers-past-gpu
|
||||||
build-args: |
|
build-args: |
|
||||||
REF=main
|
REF=main
|
||||||
|
BASE_DOCKER_IMAGE=${{ steps.get-base-image.outputs.base_image }}
|
||||||
FRAMEWORK=pytorch
|
FRAMEWORK=pytorch
|
||||||
VERSION=${{ matrix.version }}
|
VERSION=${{ matrix.version }}
|
||||||
push: true
|
push: true
|
||||||
@ -47,62 +59,41 @@ jobs:
|
|||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
version: ["2.8", "2.7", "2.6", "2.5"]
|
version: ["2.11", "2.10", "2.9", "2.8", "2.7", "2.6", "2.5"]
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
-
|
-
|
||||||
name: Set up Docker Buildx
|
name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v1
|
uses: docker/setup-buildx-action@v2
|
||||||
-
|
-
|
||||||
name: Check out code
|
name: Check out code
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v3
|
||||||
|
-
|
||||||
|
id: get-base-image
|
||||||
|
name: Get Base Image
|
||||||
|
env:
|
||||||
|
framework_version: ${{ matrix.version }}
|
||||||
|
run: |
|
||||||
|
echo "base_image=$(python3 -c 'import os; from utils.past_ci_versions import past_versions_testing; base_image = past_versions_testing["tensorflow"][os.environ["framework_version"]]["base_image"]; print(base_image)')" >> $GITHUB_OUTPUT
|
||||||
|
-
|
||||||
|
name: Print Base Image
|
||||||
|
run: |
|
||||||
|
echo ${{ steps.get-base-image.outputs.base_image }}
|
||||||
-
|
-
|
||||||
name: Login to DockerHub
|
name: Login to DockerHub
|
||||||
uses: docker/login-action@v1
|
uses: docker/login-action@v2
|
||||||
with:
|
with:
|
||||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||||
-
|
-
|
||||||
name: Build and push
|
name: Build and push
|
||||||
uses: docker/build-push-action@v2
|
uses: docker/build-push-action@v3
|
||||||
with:
|
with:
|
||||||
context: ./docker/transformers-past-gpu
|
context: ./docker/transformers-past-gpu
|
||||||
build-args: |
|
build-args: |
|
||||||
REF=main
|
REF=main
|
||||||
|
BASE_DOCKER_IMAGE=${{ steps.get-base-image.outputs.base_image }}
|
||||||
FRAMEWORK=tensorflow
|
FRAMEWORK=tensorflow
|
||||||
VERSION=${{ matrix.version }}
|
VERSION=${{ matrix.version }}
|
||||||
push: true
|
push: true
|
||||||
tags: huggingface/transformers-tensorflow-past-${{ matrix.version }}-gpu
|
tags: huggingface/transformers-tensorflow-past-${{ matrix.version }}-gpu
|
||||||
|
|
||||||
past-tensorflow-docker-2-4:
|
|
||||||
name: "Past TensorFlow Docker"
|
|
||||||
strategy:
|
|
||||||
fail-fast: false
|
|
||||||
matrix:
|
|
||||||
version: ["2.4"]
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
-
|
|
||||||
name: Set up Docker Buildx
|
|
||||||
uses: docker/setup-buildx-action@v1
|
|
||||||
-
|
|
||||||
name: Check out code
|
|
||||||
uses: actions/checkout@v2
|
|
||||||
-
|
|
||||||
name: Login to DockerHub
|
|
||||||
uses: docker/login-action@v1
|
|
||||||
with:
|
|
||||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
|
||||||
password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
|
||||||
-
|
|
||||||
name: Build and push
|
|
||||||
uses: docker/build-push-action@v2
|
|
||||||
with:
|
|
||||||
context: ./docker/transformers-past-gpu
|
|
||||||
build-args: |
|
|
||||||
REF=main
|
|
||||||
BASE_DOCKER_IMAGE=nvidia/cuda:11.0.3-cudnn8-devel-ubuntu20.04
|
|
||||||
FRAMEWORK=tensorflow
|
|
||||||
VERSION=${{ matrix.version }}
|
|
||||||
push: true
|
|
||||||
tags: huggingface/transformers-tensorflow-past-${{ matrix.version }}-gpu
|
|
||||||
3
.github/workflows/build_documentation.yml
vendored
3
.github/workflows/build_documentation.yml
vendored
@ -15,6 +15,7 @@ jobs:
|
|||||||
commit_sha: ${{ github.sha }}
|
commit_sha: ${{ github.sha }}
|
||||||
package: transformers
|
package: transformers
|
||||||
notebook_folder: transformers_doc
|
notebook_folder: transformers_doc
|
||||||
languages: de en es it pt
|
languages: de en es fr it ko pt zh
|
||||||
secrets:
|
secrets:
|
||||||
token: ${{ secrets.HUGGINGFACE_PUSH }}
|
token: ${{ secrets.HUGGINGFACE_PUSH }}
|
||||||
|
hf_token: ${{ secrets.HF_DOC_BUILD_PUSH }}
|
||||||
|
|||||||
2
.github/workflows/build_pr_documentation.yml
vendored
2
.github/workflows/build_pr_documentation.yml
vendored
@ -14,4 +14,4 @@ jobs:
|
|||||||
commit_sha: ${{ github.event.pull_request.head.sha }}
|
commit_sha: ${{ github.event.pull_request.head.sha }}
|
||||||
pr_number: ${{ github.event.number }}
|
pr_number: ${{ github.event.number }}
|
||||||
package: transformers
|
package: transformers
|
||||||
languages: de en es it pt
|
languages: de en es fr it ko pt zh
|
||||||
|
|||||||
9
.github/workflows/check_runner_status.yml
vendored
9
.github/workflows/check_runner_status.yml
vendored
@ -23,7 +23,7 @@ jobs:
|
|||||||
offline_runners: ${{ steps.set-offline_runners.outputs.offline_runners }}
|
offline_runners: ${{ steps.set-offline_runners.outputs.offline_runners }}
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout transformers
|
- name: Checkout transformers
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v3
|
||||||
with:
|
with:
|
||||||
fetch-depth: 2
|
fetch-depth: 2
|
||||||
|
|
||||||
@ -35,7 +35,7 @@ jobs:
|
|||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
run: |
|
run: |
|
||||||
offline_runners=$(python3 -c 'fp = open("offline_runners.txt"); failed = fp.read(); fp.close(); print(failed)')
|
offline_runners=$(python3 -c 'fp = open("offline_runners.txt"); failed = fp.read(); fp.close(); print(failed)')
|
||||||
echo "::set-output name=offline_runners::$offline_runners"
|
echo "offline_runners=$offline_runners" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
send_results:
|
send_results:
|
||||||
name: Send results to webhook
|
name: Send results to webhook
|
||||||
@ -48,8 +48,8 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
echo "Runner availability: ${{ needs.check_runner_status.result }}"
|
echo "Runner availability: ${{ needs.check_runner_status.result }}"
|
||||||
|
|
||||||
- uses: actions/checkout@v2
|
- uses: actions/checkout@v3
|
||||||
- uses: actions/download-artifact@v2
|
- uses: actions/download-artifact@v3
|
||||||
- name: Send message to Slack
|
- name: Send message to Slack
|
||||||
env:
|
env:
|
||||||
CI_SLACK_BOT_TOKEN: ${{ secrets.CI_SLACK_BOT_TOKEN }}
|
CI_SLACK_BOT_TOKEN: ${{ secrets.CI_SLACK_BOT_TOKEN }}
|
||||||
@ -57,6 +57,7 @@ jobs:
|
|||||||
CI_SLACK_CHANNEL_ID_DAILY: ${{ secrets.CI_SLACK_CHANNEL_ID_DAILY }}
|
CI_SLACK_CHANNEL_ID_DAILY: ${{ secrets.CI_SLACK_CHANNEL_ID_DAILY }}
|
||||||
CI_SLACK_CHANNEL_DUMMY_TESTS: ${{ secrets.CI_SLACK_CHANNEL_DUMMY_TESTS }}
|
CI_SLACK_CHANNEL_DUMMY_TESTS: ${{ secrets.CI_SLACK_CHANNEL_DUMMY_TESTS }}
|
||||||
CI_SLACK_REPORT_CHANNEL_ID: ${{ secrets.CI_SLACK_CHANNEL_ID_DAILY }}
|
CI_SLACK_REPORT_CHANNEL_ID: ${{ secrets.CI_SLACK_CHANNEL_ID_DAILY }}
|
||||||
|
ACCESS_REPO_INFO_TOKEN: ${{ secrets.ACCESS_REPO_INFO_TOKEN }}
|
||||||
CI_EVENT: runner status check
|
CI_EVENT: runner status check
|
||||||
RUNNER_STATUS: ${{ needs.check_runner_status.result }}
|
RUNNER_STATUS: ${{ needs.check_runner_status.result }}
|
||||||
OFFLINE_RUNNERS: ${{ needs.check_runner_status.outputs.offline_runners }}
|
OFFLINE_RUNNERS: ${{ needs.check_runner_status.outputs.offline_runners }}
|
||||||
|
|||||||
82
.github/workflows/check_tiny_models.yml
vendored
Normal file
82
.github/workflows/check_tiny_models.yml
vendored
Normal file
@ -0,0 +1,82 @@
|
|||||||
|
name: Check Tiny Models
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- check_tiny_models*
|
||||||
|
repository_dispatch:
|
||||||
|
schedule:
|
||||||
|
- cron: "0 2 * * *"
|
||||||
|
|
||||||
|
env:
|
||||||
|
TOKEN: ${{ secrets.TRANSFORMERS_HUB_BOT_HF_TOKEN }}
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
check_tiny_models:
|
||||||
|
name: Check tiny models
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout transformers
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
fetch-depth: 2
|
||||||
|
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
- name: Set up Python 3.8
|
||||||
|
uses: actions/setup-python@v4
|
||||||
|
with:
|
||||||
|
# Semantic version range syntax or exact version of a Python version
|
||||||
|
python-version: '3.8'
|
||||||
|
# Optional - x64 or x86 architecture, defaults to x64
|
||||||
|
architecture: 'x64'
|
||||||
|
|
||||||
|
- name: Install
|
||||||
|
run: |
|
||||||
|
sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng cmake
|
||||||
|
pip install --upgrade pip
|
||||||
|
python -m pip install -U .[sklearn,torch,testing,sentencepiece,torch-speech,vision,timm,video,tf-cpu]
|
||||||
|
pip install tensorflow_probability
|
||||||
|
python -m pip install -U natten
|
||||||
|
|
||||||
|
- name: Create all tiny models (locally)
|
||||||
|
run: |
|
||||||
|
python utils/create_dummy_models.py tiny_local_models --all --num_workers 2
|
||||||
|
|
||||||
|
- name: Local tiny model reports artifacts
|
||||||
|
if: ${{ always() }}
|
||||||
|
uses: actions/upload-artifact@v3
|
||||||
|
with:
|
||||||
|
name: tiny_local_model_creation_reports
|
||||||
|
path: tiny_local_models/reports
|
||||||
|
|
||||||
|
# GitHub-hosted runners have 2-core CPUs
|
||||||
|
- name: Run pipeline tests against all new (local) tiny models
|
||||||
|
run: |
|
||||||
|
OMP_NUM_THREADS=1 TRANSFORMERS_TINY_MODEL_PATH=tiny_local_models python -m pytest --max-worker-restart=0 -n 2 --dist=loadfile -s -rA --make-reports=tests_pipelines tests/models -m is_pipeline_test -k "test_pipeline_" | tee tests_output.txt
|
||||||
|
|
||||||
|
- name: Test suite reports artifacts
|
||||||
|
if: ${{ always() }}
|
||||||
|
uses: actions/upload-artifact@v3
|
||||||
|
with:
|
||||||
|
name: tiny_local_model_creation_reports
|
||||||
|
path: reports/tests_pipelines
|
||||||
|
|
||||||
|
- name: Create + Upload tiny models for new model architecture(s)
|
||||||
|
run: |
|
||||||
|
python utils/update_tiny_models.py --num_workers 2
|
||||||
|
|
||||||
|
- name: Full report
|
||||||
|
run: cat tiny_models/reports/tiny_model_creation_report.json
|
||||||
|
|
||||||
|
- name: Failure report
|
||||||
|
run: cat tiny_models/reports/simple_failed_report.txt
|
||||||
|
|
||||||
|
- name: Summary report
|
||||||
|
run: cat tiny_models/reports/tiny_model_summary.json
|
||||||
|
|
||||||
|
- name: New tiny model creation reports artifacts
|
||||||
|
if: ${{ always() }}
|
||||||
|
uses: actions/upload-artifact@v3
|
||||||
|
with:
|
||||||
|
name: tiny_model_creation_reports
|
||||||
|
path: tiny_models/reports
|
||||||
13
.github/workflows/delete_doc_comment.yml
vendored
13
.github/workflows/delete_doc_comment.yml
vendored
@ -1,13 +1,14 @@
|
|||||||
name: Delete dev documentation
|
name: Delete doc comment
|
||||||
|
|
||||||
on:
|
on:
|
||||||
pull_request:
|
workflow_run:
|
||||||
types: [ closed ]
|
workflows: ["Delete doc comment trigger"]
|
||||||
|
types:
|
||||||
|
- completed
|
||||||
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
delete:
|
delete:
|
||||||
uses: huggingface/doc-builder/.github/workflows/delete_doc_comment.yml@main
|
uses: huggingface/doc-builder/.github/workflows/delete_doc_comment.yml@main
|
||||||
with:
|
secrets:
|
||||||
pr_number: ${{ github.event.number }}
|
comment_bot_token: ${{ secrets.COMMENT_BOT_TOKEN }}
|
||||||
package: transformers
|
|
||||||
12
.github/workflows/delete_doc_comment_trigger.yml
vendored
Normal file
12
.github/workflows/delete_doc_comment_trigger.yml
vendored
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
name: Delete doc comment trigger
|
||||||
|
|
||||||
|
on:
|
||||||
|
pull_request:
|
||||||
|
types: [ closed ]
|
||||||
|
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
delete:
|
||||||
|
uses: huggingface/doc-builder/.github/workflows/delete_doc_comment_trigger.yml@main
|
||||||
|
with:
|
||||||
|
pr_number: ${{ github.event.number }}
|
||||||
27
.github/workflows/doctests.yml
vendored
27
.github/workflows/doctests.yml
vendored
@ -6,7 +6,7 @@ on:
|
|||||||
- doctest*
|
- doctest*
|
||||||
repository_dispatch:
|
repository_dispatch:
|
||||||
schedule:
|
schedule:
|
||||||
- cron: "0 0 * * *"
|
- cron: "17 2 * * *"
|
||||||
|
|
||||||
|
|
||||||
env:
|
env:
|
||||||
@ -25,26 +25,27 @@ jobs:
|
|||||||
image: huggingface/transformers-all-latest-gpu
|
image: huggingface/transformers-all-latest-gpu
|
||||||
options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v2
|
- name: uninstall transformers (installed during docker image build)
|
||||||
|
run: python3 -m pip uninstall -y transformers
|
||||||
|
|
||||||
|
- uses: actions/checkout@v3
|
||||||
- name: NVIDIA-SMI
|
- name: NVIDIA-SMI
|
||||||
run: |
|
run: |
|
||||||
nvidia-smi
|
nvidia-smi
|
||||||
|
|
||||||
|
- name: Install transformers in edit mode
|
||||||
|
run: python3 -m pip install -e .
|
||||||
|
|
||||||
- name: GPU visibility
|
- name: GPU visibility
|
||||||
run: |
|
run: |
|
||||||
python3 utils/print_env.py
|
python3 utils/print_env.py
|
||||||
|
|
||||||
- name: Prepare files for doctests
|
- name: Show installed libraries and their versions
|
||||||
run: |
|
run: pip freeze
|
||||||
python3 utils/prepare_for_doc_test.py src docs
|
|
||||||
|
|
||||||
- name: Run doctests
|
- name: Run doctests
|
||||||
run: |
|
run: |
|
||||||
python3 -m pytest -v --make-reports doc_tests_gpu --doctest-modules $(cat utils/documentation_tests.txt) -sv --doctest-continue-on-failure --doctest-glob="*.mdx"
|
python3 -m pytest -v --make-reports doc_tests_gpu --doctest-modules $(cat utils/documentation_tests.txt) -sv --doctest-continue-on-failure --doctest-glob="*.md"
|
||||||
|
|
||||||
- name: Clean files after doctests
|
|
||||||
run: |
|
|
||||||
python3 utils/prepare_for_doc_test.py src docs --remove_new_line
|
|
||||||
|
|
||||||
- name: Failure short reports
|
- name: Failure short reports
|
||||||
if: ${{ failure() }}
|
if: ${{ failure() }}
|
||||||
@ -53,7 +54,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Test suite reports artifacts
|
- name: Test suite reports artifacts
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
uses: actions/upload-artifact@v2
|
uses: actions/upload-artifact@v3
|
||||||
with:
|
with:
|
||||||
name: doc_tests_gpu_test_reports
|
name: doc_tests_gpu_test_reports
|
||||||
path: reports/doc_tests_gpu
|
path: reports/doc_tests_gpu
|
||||||
@ -65,8 +66,8 @@ jobs:
|
|||||||
if: always()
|
if: always()
|
||||||
needs: [run_doctests]
|
needs: [run_doctests]
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v2
|
- uses: actions/checkout@v3
|
||||||
- uses: actions/download-artifact@v2
|
- uses: actions/download-artifact@v3
|
||||||
- name: Send message to Slack
|
- name: Send message to Slack
|
||||||
env:
|
env:
|
||||||
CI_SLACK_BOT_TOKEN: ${{ secrets.CI_SLACK_BOT_TOKEN }}
|
CI_SLACK_BOT_TOKEN: ${{ secrets.CI_SLACK_BOT_TOKEN }}
|
||||||
|
|||||||
4
.github/workflows/model-templates.yml
vendored
4
.github/workflows/model-templates.yml
vendored
@ -10,7 +10,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v3
|
||||||
|
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
@ -75,7 +75,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Test suite reports artifacts
|
- name: Test suite reports artifacts
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
uses: actions/upload-artifact@v2
|
uses: actions/upload-artifact@v3
|
||||||
with:
|
with:
|
||||||
name: run_all_tests_templates_test_reports
|
name: run_all_tests_templates_test_reports
|
||||||
path: reports/tests_templates
|
path: reports/tests_templates
|
||||||
|
|||||||
145
.github/workflows/self-nightly-past-ci-caller.yml
vendored
Normal file
145
.github/workflows/self-nightly-past-ci-caller.yml
vendored
Normal file
@ -0,0 +1,145 @@
|
|||||||
|
name: Self-hosted runner (nightly-past-ci-caller)
|
||||||
|
|
||||||
|
on:
|
||||||
|
schedule:
|
||||||
|
# 2:17 am on each Sunday and Thursday
|
||||||
|
|
||||||
|
- cron: "17 2 * * 0,4"
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- run_nightly_ci*
|
||||||
|
- run_past_ci*
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build_nightly_ci_images:
|
||||||
|
name: Build Nightly CI Docker Images
|
||||||
|
if: (github.event_name == 'schedule') || ((github.event_name == 'push') && startsWith(github.ref_name, 'run_nightly_ci'))
|
||||||
|
uses: ./.github/workflows/build-nightly-ci-docker-images.yml
|
||||||
|
secrets: inherit
|
||||||
|
|
||||||
|
run_nightly_ci:
|
||||||
|
name: Nightly CI
|
||||||
|
needs: [build_nightly_ci_images]
|
||||||
|
uses: ./.github/workflows/self-nightly-scheduled.yml
|
||||||
|
secrets: inherit
|
||||||
|
|
||||||
|
run_past_ci_pytorch_1-13:
|
||||||
|
name: PyTorch 1.13
|
||||||
|
if: (cancelled() != true) && ((github.event_name == 'schedule') || ((github.event_name == 'push') && startsWith(github.ref_name, 'run_past_ci')))
|
||||||
|
needs: [run_nightly_ci]
|
||||||
|
uses: ./.github/workflows/self-past.yml
|
||||||
|
with:
|
||||||
|
framework: pytorch
|
||||||
|
version: "1.13"
|
||||||
|
sha: ${{ github.sha }}
|
||||||
|
secrets: inherit
|
||||||
|
|
||||||
|
run_past_ci_pytorch_1-12:
|
||||||
|
name: PyTorch 1.12
|
||||||
|
if: (cancelled() != true) && ((github.event_name == 'schedule') || ((github.event_name == 'push') && startsWith(github.ref_name, 'run_past_ci')))
|
||||||
|
needs: [run_past_ci_pytorch_1-13]
|
||||||
|
uses: ./.github/workflows/self-past.yml
|
||||||
|
with:
|
||||||
|
framework: pytorch
|
||||||
|
version: "1.12"
|
||||||
|
sha: ${{ github.sha }}
|
||||||
|
secrets: inherit
|
||||||
|
|
||||||
|
run_past_ci_pytorch_1-11:
|
||||||
|
name: PyTorch 1.11
|
||||||
|
if: (cancelled() != true) && ((github.event_name == 'schedule') || ((github.event_name == 'push') && startsWith(github.ref_name, 'run_past_ci')))
|
||||||
|
needs: [run_past_ci_pytorch_1-12]
|
||||||
|
uses: ./.github/workflows/self-past.yml
|
||||||
|
with:
|
||||||
|
framework: pytorch
|
||||||
|
version: "1.11"
|
||||||
|
sha: ${{ github.sha }}
|
||||||
|
secrets: inherit
|
||||||
|
|
||||||
|
run_past_ci_pytorch_1-10:
|
||||||
|
name: PyTorch 1.10
|
||||||
|
if: (cancelled() != true) && ((github.event_name == 'schedule') || ((github.event_name == 'push') && startsWith(github.ref_name, 'run_past_ci')))
|
||||||
|
needs: [run_past_ci_pytorch_1-11]
|
||||||
|
uses: ./.github/workflows/self-past.yml
|
||||||
|
with:
|
||||||
|
framework: pytorch
|
||||||
|
version: "1.10"
|
||||||
|
sha: ${{ github.sha }}
|
||||||
|
secrets: inherit
|
||||||
|
|
||||||
|
run_past_ci_tensorflow_2-11:
|
||||||
|
name: TensorFlow 2.11
|
||||||
|
if: (cancelled() != true) && ((github.event_name == 'push') && startsWith(github.ref_name, 'run_past_ci'))
|
||||||
|
needs: [run_past_ci_pytorch_1-10]
|
||||||
|
uses: ./.github/workflows/self-past.yml
|
||||||
|
with:
|
||||||
|
framework: tensorflow
|
||||||
|
version: "2.11"
|
||||||
|
sha: ${{ github.sha }}
|
||||||
|
secrets: inherit
|
||||||
|
|
||||||
|
run_past_ci_tensorflow_2-10:
|
||||||
|
name: TensorFlow 2.10
|
||||||
|
if: (cancelled() != true) && ((github.event_name == 'push') && startsWith(github.ref_name, 'run_past_ci'))
|
||||||
|
needs: [run_past_ci_tensorflow_2-11]
|
||||||
|
uses: ./.github/workflows/self-past.yml
|
||||||
|
with:
|
||||||
|
framework: tensorflow
|
||||||
|
version: "2.10"
|
||||||
|
sha: ${{ github.sha }}
|
||||||
|
secrets: inherit
|
||||||
|
|
||||||
|
run_past_ci_tensorflow_2-9:
|
||||||
|
name: TensorFlow 2.9
|
||||||
|
if: (cancelled() != true) && ((github.event_name == 'push') && startsWith(github.ref_name, 'run_past_ci'))
|
||||||
|
needs: [run_past_ci_tensorflow_2-10]
|
||||||
|
uses: ./.github/workflows/self-past.yml
|
||||||
|
with:
|
||||||
|
framework: tensorflow
|
||||||
|
version: "2.9"
|
||||||
|
sha: ${{ github.sha }}
|
||||||
|
secrets: inherit
|
||||||
|
|
||||||
|
run_past_ci_tensorflow_2-8:
|
||||||
|
name: TensorFlow 2.8
|
||||||
|
if: (cancelled() != true) && ((github.event_name == 'push') && startsWith(github.ref_name, 'run_past_ci'))
|
||||||
|
needs: [run_past_ci_tensorflow_2-9]
|
||||||
|
uses: ./.github/workflows/self-past.yml
|
||||||
|
with:
|
||||||
|
framework: tensorflow
|
||||||
|
version: "2.8"
|
||||||
|
sha: ${{ github.sha }}
|
||||||
|
secrets: inherit
|
||||||
|
|
||||||
|
run_past_ci_tensorflow_2-7:
|
||||||
|
name: TensorFlow 2.7
|
||||||
|
if: (cancelled() != true) && ((github.event_name == 'push') && startsWith(github.ref_name, 'run_past_ci'))
|
||||||
|
needs: [run_past_ci_tensorflow_2-8]
|
||||||
|
uses: ./.github/workflows/self-past.yml
|
||||||
|
with:
|
||||||
|
framework: tensorflow
|
||||||
|
version: "2.7"
|
||||||
|
sha: ${{ github.sha }}
|
||||||
|
secrets: inherit
|
||||||
|
|
||||||
|
run_past_ci_tensorflow_2-6:
|
||||||
|
name: TensorFlow 2.6
|
||||||
|
if: (cancelled() != true) && ((github.event_name == 'push') && startsWith(github.ref_name, 'run_past_ci'))
|
||||||
|
needs: [run_past_ci_tensorflow_2-7]
|
||||||
|
uses: ./.github/workflows/self-past.yml
|
||||||
|
with:
|
||||||
|
framework: tensorflow
|
||||||
|
version: "2.6"
|
||||||
|
sha: ${{ github.sha }}
|
||||||
|
secrets: inherit
|
||||||
|
|
||||||
|
run_past_ci_tensorflow_2-5:
|
||||||
|
name: TensorFlow 2.5
|
||||||
|
if: (cancelled() != true) && ((github.event_name == 'push') && startsWith(github.ref_name, 'run_past_ci'))
|
||||||
|
needs: [run_past_ci_tensorflow_2-6]
|
||||||
|
uses: ./.github/workflows/self-past.yml
|
||||||
|
with:
|
||||||
|
framework: tensorflow
|
||||||
|
version: "2.5"
|
||||||
|
sha: ${{ github.sha }}
|
||||||
|
secrets: inherit
|
||||||
80
.github/workflows/self-nightly-scheduled.yml
vendored
80
.github/workflows/self-nightly-scheduled.yml
vendored
@ -1,4 +1,4 @@
|
|||||||
name: Self-hosted runner (nightly)
|
name: Self-hosted runner (nightly-ci)
|
||||||
|
|
||||||
# Note that each job's dependencies go into a corresponding docker file.
|
# Note that each job's dependencies go into a corresponding docker file.
|
||||||
#
|
#
|
||||||
@ -8,9 +8,7 @@ name: Self-hosted runner (nightly)
|
|||||||
|
|
||||||
on:
|
on:
|
||||||
repository_dispatch:
|
repository_dispatch:
|
||||||
# Disable temporarily until the test suite can be run under 12 hours.
|
workflow_call:
|
||||||
# schedule:
|
|
||||||
# - cron: "0 16 * * *"
|
|
||||||
|
|
||||||
env:
|
env:
|
||||||
HF_HOME: /mnt/cache
|
HF_HOME: /mnt/cache
|
||||||
@ -28,12 +26,12 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout transformers
|
- name: Checkout transformers
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v3
|
||||||
with:
|
with:
|
||||||
fetch-depth: 2
|
fetch-depth: 2
|
||||||
|
|
||||||
- name: Check Runner Status
|
- name: Check Runner Status
|
||||||
run: python utils/check_self_hosted_runner.py --target_runners single-gpu-scheduled-ci-runner-docker,multi-gpu-scheduled-ci-runner-docker --token ${{ secrets.ACCESS_REPO_INFO_TOKEN }}
|
run: python utils/check_self_hosted_runner.py --target_runners single-gpu-past-ci-runner-docker,multi-gpu-past-ci-runner-docker --token ${{ secrets.ACCESS_REPO_INFO_TOKEN }}
|
||||||
|
|
||||||
check_runners:
|
check_runners:
|
||||||
name: Check Runners
|
name: Check Runners
|
||||||
@ -41,7 +39,7 @@ jobs:
|
|||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
machine_type: [single-gpu, multi-gpu]
|
machine_type: [single-gpu, multi-gpu]
|
||||||
runs-on: ${{ format('{0}-{1}', matrix.machine_type, 'docker') }}
|
runs-on: ${{ format('{0}-{1}', matrix.machine_type, 'docker-past-ci') }}
|
||||||
container:
|
container:
|
||||||
image: huggingface/transformers-all-latest-torch-nightly-gpu
|
image: huggingface/transformers-all-latest-torch-nightly-gpu
|
||||||
options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||||
@ -56,7 +54,7 @@ jobs:
|
|||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
machine_type: [single-gpu, multi-gpu]
|
machine_type: [single-gpu, multi-gpu]
|
||||||
runs-on: ${{ format('{0}-{1}', matrix.machine_type, 'docker') }}
|
runs-on: ${{ format('{0}-{1}', matrix.machine_type, 'docker-past-ci') }}
|
||||||
container:
|
container:
|
||||||
image: huggingface/transformers-all-latest-torch-nightly-gpu
|
image: huggingface/transformers-all-latest-torch-nightly-gpu
|
||||||
options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||||
@ -75,11 +73,15 @@ jobs:
|
|||||||
rm -rf tests/models/__pycache__
|
rm -rf tests/models/__pycache__
|
||||||
rm -rf reports
|
rm -rf reports
|
||||||
|
|
||||||
|
- name: Show installed libraries and their versions
|
||||||
|
working-directory: /transformers
|
||||||
|
run: pip freeze
|
||||||
|
|
||||||
- id: set-matrix
|
- id: set-matrix
|
||||||
name: Identify models to test
|
name: Identify models to test
|
||||||
working-directory: /transformers/tests
|
working-directory: /transformers/tests
|
||||||
run: |
|
run: |
|
||||||
echo "::set-output name=matrix::$(python3 -c 'import os; tests = os.getcwd(); model_tests = os.listdir(os.path.join(tests, "models")); d1 = sorted(list(filter(os.path.isdir, os.listdir(tests)))); d2 = sorted(list(filter(os.path.isdir, [f"models/{x}" for x in model_tests]))); d1.remove("models"); d = d2 + d1; print(d)')"
|
echo "matrix=$(python3 -c 'import os; tests = os.getcwd(); model_tests = os.listdir(os.path.join(tests, "models")); d1 = sorted(list(filter(os.path.isdir, os.listdir(tests)))); d2 = sorted(list(filter(os.path.isdir, [f"models/{x}" for x in model_tests]))); d1.remove("models"); d = d2 + d1; print(d)')" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
- name: NVIDIA-SMI
|
- name: NVIDIA-SMI
|
||||||
run: |
|
run: |
|
||||||
@ -92,7 +94,7 @@ jobs:
|
|||||||
matrix:
|
matrix:
|
||||||
folders: ${{ fromJson(needs.setup.outputs.matrix) }}
|
folders: ${{ fromJson(needs.setup.outputs.matrix) }}
|
||||||
machine_type: [single-gpu]
|
machine_type: [single-gpu]
|
||||||
runs-on: ${{ format('{0}-{1}', matrix.machine_type, 'docker') }}
|
runs-on: ${{ format('{0}-{1}', matrix.machine_type, 'docker-past-ci') }}
|
||||||
container:
|
container:
|
||||||
image: huggingface/transformers-all-latest-torch-nightly-gpu
|
image: huggingface/transformers-all-latest-torch-nightly-gpu
|
||||||
options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||||
@ -113,6 +115,10 @@ jobs:
|
|||||||
working-directory: /transformers
|
working-directory: /transformers
|
||||||
run: git fetch && git checkout ${{ github.sha }}
|
run: git fetch && git checkout ${{ github.sha }}
|
||||||
|
|
||||||
|
- name: Reinstall transformers in edit mode (remove the one installed during docker image build)
|
||||||
|
working-directory: /transformers
|
||||||
|
run: python3 -m pip uninstall -y transformers && python3 -m pip install -e .
|
||||||
|
|
||||||
- name: NVIDIA-SMI
|
- name: NVIDIA-SMI
|
||||||
run: |
|
run: |
|
||||||
nvidia-smi
|
nvidia-smi
|
||||||
@ -122,6 +128,10 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
python3 utils/print_env.py
|
python3 utils/print_env.py
|
||||||
|
|
||||||
|
- name: Show installed libraries and their versions
|
||||||
|
working-directory: /transformers
|
||||||
|
run: pip freeze
|
||||||
|
|
||||||
- name: Run all tests on GPU
|
- name: Run all tests on GPU
|
||||||
working-directory: /transformers
|
working-directory: /transformers
|
||||||
run: python3 -m pytest -v --make-reports=${{ matrix.machine_type }}_tests_gpu_${{ matrix.folders }} tests/${{ matrix.folders }}
|
run: python3 -m pytest -v --make-reports=${{ matrix.machine_type }}_tests_gpu_${{ matrix.folders }} tests/${{ matrix.folders }}
|
||||||
@ -133,9 +143,9 @@ jobs:
|
|||||||
|
|
||||||
- name: Test suite reports artifacts
|
- name: Test suite reports artifacts
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
uses: actions/upload-artifact@v2
|
uses: actions/upload-artifact@v3
|
||||||
with:
|
with:
|
||||||
name: ${{ matrix.machine_type }}_run_all_tests_gpu_${{ env.matrix_folders }}_test_reports
|
name: ${{ matrix.machine_type }}_run_all_tests_gpu_${{ env.matrix_folders }}_test_reports_postfix_nightly
|
||||||
path: /transformers/reports/${{ matrix.machine_type }}_tests_gpu_${{ matrix.folders }}
|
path: /transformers/reports/${{ matrix.machine_type }}_tests_gpu_${{ matrix.folders }}
|
||||||
|
|
||||||
run_tests_multi_gpu:
|
run_tests_multi_gpu:
|
||||||
@ -145,7 +155,7 @@ jobs:
|
|||||||
matrix:
|
matrix:
|
||||||
folders: ${{ fromJson(needs.setup.outputs.matrix) }}
|
folders: ${{ fromJson(needs.setup.outputs.matrix) }}
|
||||||
machine_type: [multi-gpu]
|
machine_type: [multi-gpu]
|
||||||
runs-on: ${{ format('{0}-{1}', matrix.machine_type, 'docker') }}
|
runs-on: ${{ format('{0}-{1}', matrix.machine_type, 'docker-past-ci') }}
|
||||||
container:
|
container:
|
||||||
image: huggingface/transformers-all-latest-torch-nightly-gpu
|
image: huggingface/transformers-all-latest-torch-nightly-gpu
|
||||||
options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||||
@ -166,6 +176,10 @@ jobs:
|
|||||||
working-directory: /transformers
|
working-directory: /transformers
|
||||||
run: git fetch && git checkout ${{ github.sha }}
|
run: git fetch && git checkout ${{ github.sha }}
|
||||||
|
|
||||||
|
- name: Reinstall transformers in edit mode (remove the one installed during docker image build)
|
||||||
|
working-directory: /transformers
|
||||||
|
run: python3 -m pip uninstall -y transformers && python3 -m pip install -e .
|
||||||
|
|
||||||
- name: NVIDIA-SMI
|
- name: NVIDIA-SMI
|
||||||
run: |
|
run: |
|
||||||
nvidia-smi
|
nvidia-smi
|
||||||
@ -175,6 +189,10 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
python3 utils/print_env.py
|
python3 utils/print_env.py
|
||||||
|
|
||||||
|
- name: Show installed libraries and their versions
|
||||||
|
working-directory: /transformers
|
||||||
|
run: pip freeze
|
||||||
|
|
||||||
- name: Run all tests on GPU
|
- name: Run all tests on GPU
|
||||||
working-directory: /transformers
|
working-directory: /transformers
|
||||||
run: python3 -m pytest -v --make-reports=${{ matrix.machine_type }}_tests_gpu_${{ matrix.folders }} tests/${{ matrix.folders }}
|
run: python3 -m pytest -v --make-reports=${{ matrix.machine_type }}_tests_gpu_${{ matrix.folders }} tests/${{ matrix.folders }}
|
||||||
@ -186,9 +204,9 @@ jobs:
|
|||||||
|
|
||||||
- name: Test suite reports artifacts
|
- name: Test suite reports artifacts
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
uses: actions/upload-artifact@v2
|
uses: actions/upload-artifact@v3
|
||||||
with:
|
with:
|
||||||
name: ${{ matrix.machine_type }}_run_all_tests_gpu_${{ env.matrix_folders }}_test_reports
|
name: ${{ matrix.machine_type }}_run_all_tests_gpu_${{ env.matrix_folders }}_test_reports_postfix_nightly
|
||||||
path: /transformers/reports/${{ matrix.machine_type }}_tests_gpu_${{ matrix.folders }}
|
path: /transformers/reports/${{ matrix.machine_type }}_tests_gpu_${{ matrix.folders }}
|
||||||
|
|
||||||
run_all_tests_torch_cuda_extensions_gpu:
|
run_all_tests_torch_cuda_extensions_gpu:
|
||||||
@ -197,7 +215,7 @@ jobs:
|
|||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
machine_type: [single-gpu, multi-gpu]
|
machine_type: [single-gpu, multi-gpu]
|
||||||
runs-on: ${{ format('{0}-{1}', matrix.machine_type, 'docker') }}
|
runs-on: ${{ format('{0}-{1}', matrix.machine_type, 'docker-past-ci') }}
|
||||||
needs: setup
|
needs: setup
|
||||||
container:
|
container:
|
||||||
image: huggingface/transformers-pytorch-deepspeed-nightly-gpu
|
image: huggingface/transformers-pytorch-deepspeed-nightly-gpu
|
||||||
@ -207,6 +225,10 @@ jobs:
|
|||||||
working-directory: /workspace/transformers
|
working-directory: /workspace/transformers
|
||||||
run: git fetch && git checkout ${{ github.sha }}
|
run: git fetch && git checkout ${{ github.sha }}
|
||||||
|
|
||||||
|
- name: Reinstall transformers in edit mode (remove the one installed during docker image build)
|
||||||
|
working-directory: /workspace/transformers
|
||||||
|
run: python3 -m pip uninstall -y transformers && python3 -m pip install -e .
|
||||||
|
|
||||||
- name: Remove cached torch extensions
|
- name: Remove cached torch extensions
|
||||||
run: rm -rf /github/home/.cache/torch_extensions/
|
run: rm -rf /github/home/.cache/torch_extensions/
|
||||||
|
|
||||||
@ -217,7 +239,7 @@ jobs:
|
|||||||
python3 -m pip uninstall -y deepspeed
|
python3 -m pip uninstall -y deepspeed
|
||||||
rm -rf DeepSpeed
|
rm -rf DeepSpeed
|
||||||
git clone https://github.com/microsoft/DeepSpeed && cd DeepSpeed && rm -rf build
|
git clone https://github.com/microsoft/DeepSpeed && cd DeepSpeed && rm -rf build
|
||||||
DS_BUILD_CPU_ADAM=1 DS_BUILD_FUSED_ADAM=1 DS_BUILD_AIO=1 DS_BUILD_UTILS=1 python3 -m pip install . --global-option="build_ext" --global-option="-j8" --no-cache -v --disable-pip-version-check
|
DS_BUILD_CPU_ADAM=1 DS_BUILD_FUSED_ADAM=1 DS_BUILD_UTILS=1 python3 -m pip install . --global-option="build_ext" --global-option="-j8" --no-cache -v --disable-pip-version-check
|
||||||
|
|
||||||
- name: NVIDIA-SMI
|
- name: NVIDIA-SMI
|
||||||
run: |
|
run: |
|
||||||
@ -228,6 +250,10 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
python utils/print_env.py
|
python utils/print_env.py
|
||||||
|
|
||||||
|
- name: Show installed libraries and their versions
|
||||||
|
working-directory: /workspace/transformers
|
||||||
|
run: pip freeze
|
||||||
|
|
||||||
- name: Run all tests on GPU
|
- name: Run all tests on GPU
|
||||||
working-directory: /workspace/transformers
|
working-directory: /workspace/transformers
|
||||||
run: |
|
run: |
|
||||||
@ -240,9 +266,9 @@ jobs:
|
|||||||
|
|
||||||
- name: Test suite reports artifacts
|
- name: Test suite reports artifacts
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
uses: actions/upload-artifact@v2
|
uses: actions/upload-artifact@v3
|
||||||
with:
|
with:
|
||||||
name: ${{ matrix.machine_type }}_run_tests_torch_cuda_extensions_gpu_test_reports
|
name: ${{ matrix.machine_type }}_run_tests_torch_cuda_extensions_gpu_test_reports_postfix_nightly
|
||||||
path: /workspace/transformers/reports/${{ matrix.machine_type }}_tests_torch_cuda_extensions_gpu
|
path: /workspace/transformers/reports/${{ matrix.machine_type }}_tests_torch_cuda_extensions_gpu
|
||||||
|
|
||||||
send_results:
|
send_results:
|
||||||
@ -266,8 +292,8 @@ jobs:
|
|||||||
echo "Runner status: ${{ needs.check_runners.result }}"
|
echo "Runner status: ${{ needs.check_runners.result }}"
|
||||||
echo "Setup status: ${{ needs.setup.result }}"
|
echo "Setup status: ${{ needs.setup.result }}"
|
||||||
|
|
||||||
- uses: actions/checkout@v2
|
- uses: actions/checkout@v3
|
||||||
- uses: actions/download-artifact@v2
|
- uses: actions/download-artifact@v3
|
||||||
- name: Send message to Slack
|
- name: Send message to Slack
|
||||||
env:
|
env:
|
||||||
CI_SLACK_BOT_TOKEN: ${{ secrets.CI_SLACK_BOT_TOKEN }}
|
CI_SLACK_BOT_TOKEN: ${{ secrets.CI_SLACK_BOT_TOKEN }}
|
||||||
@ -275,7 +301,8 @@ jobs:
|
|||||||
CI_SLACK_CHANNEL_ID_DAILY: ${{ secrets.CI_SLACK_CHANNEL_ID_DAILY }}
|
CI_SLACK_CHANNEL_ID_DAILY: ${{ secrets.CI_SLACK_CHANNEL_ID_DAILY }}
|
||||||
CI_SLACK_CHANNEL_DUMMY_TESTS: ${{ secrets.CI_SLACK_CHANNEL_DUMMY_TESTS }}
|
CI_SLACK_CHANNEL_DUMMY_TESTS: ${{ secrets.CI_SLACK_CHANNEL_DUMMY_TESTS }}
|
||||||
CI_SLACK_REPORT_CHANNEL_ID: ${{ secrets.CI_SLACK_CHANNEL_ID_PAST_FUTURE }}
|
CI_SLACK_REPORT_CHANNEL_ID: ${{ secrets.CI_SLACK_CHANNEL_ID_PAST_FUTURE }}
|
||||||
CI_EVENT: nightly-build
|
ACCESS_REPO_INFO_TOKEN: ${{ secrets.ACCESS_REPO_INFO_TOKEN }}
|
||||||
|
CI_EVENT: Nightly CI
|
||||||
RUNNER_STATUS: ${{ needs.check_runner_status.result }}
|
RUNNER_STATUS: ${{ needs.check_runner_status.result }}
|
||||||
RUNNER_ENV_STATUS: ${{ needs.check_runners.result }}
|
RUNNER_ENV_STATUS: ${{ needs.check_runners.result }}
|
||||||
SETUP_STATUS: ${{ needs.setup.result }}
|
SETUP_STATUS: ${{ needs.setup.result }}
|
||||||
@ -283,4 +310,13 @@ jobs:
|
|||||||
# `models/bert` to `models_bert` is required, as the artifact names use `_` instead of `/`.
|
# `models/bert` to `models_bert` is required, as the artifact names use `_` instead of `/`.
|
||||||
run: |
|
run: |
|
||||||
pip install slack_sdk
|
pip install slack_sdk
|
||||||
|
pip show slack_sdk
|
||||||
python utils/notification_service.py "${{ needs.setup.outputs.matrix }}"
|
python utils/notification_service.py "${{ needs.setup.outputs.matrix }}"
|
||||||
|
|
||||||
|
|
||||||
|
# delete-artifact
|
||||||
|
- uses: geekyeggo/delete-artifact@v2
|
||||||
|
with:
|
||||||
|
name: |
|
||||||
|
single-*
|
||||||
|
multi-*
|
||||||
136
.github/workflows/self-past-caller.yml
vendored
136
.github/workflows/self-past-caller.yml
vendored
@ -1,136 +0,0 @@
|
|||||||
name: Self-hosted runner (past-ci-caller)
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- run-past-ci*
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
run_past_ci_pytorch_1-11:
|
|
||||||
name: PyTorch 1.11
|
|
||||||
if: always()
|
|
||||||
uses: ./.github/workflows/self-past.yml
|
|
||||||
with:
|
|
||||||
framework: pytorch
|
|
||||||
version: "1.11"
|
|
||||||
secrets: inherit
|
|
||||||
|
|
||||||
run_past_ci_pytorch_1-10:
|
|
||||||
name: PyTorch 1.10
|
|
||||||
if: always()
|
|
||||||
needs: [run_past_ci_pytorch_1-11]
|
|
||||||
uses: ./.github/workflows/self-past.yml
|
|
||||||
with:
|
|
||||||
framework: pytorch
|
|
||||||
version: "1.10"
|
|
||||||
secrets: inherit
|
|
||||||
|
|
||||||
run_past_ci_pytorch_1-9:
|
|
||||||
name: PyTorch 1.9
|
|
||||||
if: always()
|
|
||||||
needs: [run_past_ci_pytorch_1-10]
|
|
||||||
uses: ./.github/workflows/self-past.yml
|
|
||||||
with:
|
|
||||||
framework: pytorch
|
|
||||||
version: "1.9"
|
|
||||||
secrets: inherit
|
|
||||||
|
|
||||||
run_past_ci_pytorch_1-8:
|
|
||||||
name: PyTorch 1.8
|
|
||||||
if: always()
|
|
||||||
needs: [run_past_ci_pytorch_1-9]
|
|
||||||
uses: ./.github/workflows/self-past.yml
|
|
||||||
with:
|
|
||||||
framework: pytorch
|
|
||||||
version: "1.8"
|
|
||||||
secrets: inherit
|
|
||||||
|
|
||||||
run_past_ci_pytorch_1-7:
|
|
||||||
name: PyTorch 1.7
|
|
||||||
if: always()
|
|
||||||
needs: [run_past_ci_pytorch_1-8]
|
|
||||||
uses: ./.github/workflows/self-past.yml
|
|
||||||
with:
|
|
||||||
framework: pytorch
|
|
||||||
version: "1.7"
|
|
||||||
secrets: inherit
|
|
||||||
|
|
||||||
run_past_ci_pytorch_1-6:
|
|
||||||
name: PyTorch 1.6
|
|
||||||
if: always()
|
|
||||||
needs: [run_past_ci_pytorch_1-7]
|
|
||||||
uses: ./.github/workflows/self-past.yml
|
|
||||||
with:
|
|
||||||
framework: pytorch
|
|
||||||
version: "1.6"
|
|
||||||
secrets: inherit
|
|
||||||
|
|
||||||
run_past_ci_pytorch_1-5:
|
|
||||||
name: PyTorch 1.5
|
|
||||||
if: always()
|
|
||||||
needs: [run_past_ci_pytorch_1-6]
|
|
||||||
uses: ./.github/workflows/self-past.yml
|
|
||||||
with:
|
|
||||||
framework: pytorch
|
|
||||||
version: "1.5"
|
|
||||||
secrets: inherit
|
|
||||||
|
|
||||||
run_past_ci_pytorch_1-4:
|
|
||||||
name: PyTorch 1.4
|
|
||||||
if: always()
|
|
||||||
needs: [run_past_ci_pytorch_1-5]
|
|
||||||
uses: ./.github/workflows/self-past.yml
|
|
||||||
with:
|
|
||||||
framework: pytorch
|
|
||||||
version: "1.4"
|
|
||||||
secrets: inherit
|
|
||||||
|
|
||||||
run_past_ci_tensorflow_2-8:
|
|
||||||
name: TensorFlow 2.8
|
|
||||||
if: always()
|
|
||||||
needs: [run_past_ci_pytorch_1-4]
|
|
||||||
uses: ./.github/workflows/self-past.yml
|
|
||||||
with:
|
|
||||||
framework: tensorflow
|
|
||||||
version: "2.8"
|
|
||||||
secrets: inherit
|
|
||||||
|
|
||||||
run_past_ci_tensorflow_2-7:
|
|
||||||
name: TensorFlow 2.7
|
|
||||||
if: always()
|
|
||||||
needs: [run_past_ci_tensorflow_2-8]
|
|
||||||
uses: ./.github/workflows/self-past.yml
|
|
||||||
with:
|
|
||||||
framework: tensorflow
|
|
||||||
version: "2.7"
|
|
||||||
secrets: inherit
|
|
||||||
|
|
||||||
run_past_ci_tensorflow_2-6:
|
|
||||||
name: TensorFlow 2.6
|
|
||||||
if: always()
|
|
||||||
needs: [run_past_ci_tensorflow_2-7]
|
|
||||||
uses: ./.github/workflows/self-past.yml
|
|
||||||
with:
|
|
||||||
framework: tensorflow
|
|
||||||
version: "2.6"
|
|
||||||
secrets: inherit
|
|
||||||
|
|
||||||
run_past_ci_tensorflow_2-5:
|
|
||||||
name: TensorFlow 2.5
|
|
||||||
if: always()
|
|
||||||
needs: [run_past_ci_tensorflow_2-6]
|
|
||||||
uses: ./.github/workflows/self-past.yml
|
|
||||||
with:
|
|
||||||
framework: tensorflow
|
|
||||||
version: "2.5"
|
|
||||||
secrets: inherit
|
|
||||||
|
|
||||||
run_past_ci_tensorflow_2-4:
|
|
||||||
name: TensorFlow 2.4
|
|
||||||
if: always()
|
|
||||||
needs: [run_past_ci_tensorflow_2-5]
|
|
||||||
uses: ./.github/workflows/self-past.yml
|
|
||||||
with:
|
|
||||||
framework: tensorflow
|
|
||||||
version: "2.4"
|
|
||||||
secrets: inherit
|
|
||||||
140
.github/workflows/self-past.yml
vendored
140
.github/workflows/self-past.yml
vendored
@ -1,4 +1,4 @@
|
|||||||
name: Self-hosted runner (past)
|
name: Self-hosted runner (past-ci)
|
||||||
|
|
||||||
# Note that each job's dependencies go into a corresponding docker file.
|
# Note that each job's dependencies go into a corresponding docker file.
|
||||||
#
|
#
|
||||||
@ -37,7 +37,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout transformers
|
- name: Checkout transformers
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v3
|
||||||
with:
|
with:
|
||||||
fetch-depth: 2
|
fetch-depth: 2
|
||||||
|
|
||||||
@ -83,12 +83,16 @@ jobs:
|
|||||||
rm -rf tests/models/__pycache__
|
rm -rf tests/models/__pycache__
|
||||||
rm -rf reports
|
rm -rf reports
|
||||||
|
|
||||||
|
- name: Show installed libraries and their versions
|
||||||
|
working-directory: /transformers
|
||||||
|
run: pip freeze
|
||||||
|
|
||||||
- id: set-matrix
|
- id: set-matrix
|
||||||
working-directory: /transformers
|
working-directory: /transformers
|
||||||
name: Identify models to test
|
name: Identify models to test
|
||||||
run: |
|
run: |
|
||||||
cd tests
|
cd tests
|
||||||
echo "::set-output name=matrix::$(python3 -c 'import os; tests = os.getcwd(); model_tests = os.listdir(os.path.join(tests, "models")); d1 = sorted(list(filter(os.path.isdir, os.listdir(tests)))); d2 = sorted(list(filter(os.path.isdir, [f"models/{x}" for x in model_tests]))); d1.remove("models"); d = d2 + d1; print(d)')"
|
echo "matrix=$(python3 -c 'import os; tests = os.getcwd(); model_tests = os.listdir(os.path.join(tests, "models")); d1 = sorted(list(filter(os.path.isdir, os.listdir(tests)))); d2 = sorted(list(filter(os.path.isdir, [f"models/{x}" for x in model_tests]))); d1.remove("models"); d = d2 + d1; print(d)')" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
run_tests_single_gpu:
|
run_tests_single_gpu:
|
||||||
name: Model tests
|
name: Model tests
|
||||||
@ -107,6 +111,10 @@ jobs:
|
|||||||
working-directory: /transformers
|
working-directory: /transformers
|
||||||
run: git fetch && git checkout ${{ inputs.sha }}
|
run: git fetch && git checkout ${{ inputs.sha }}
|
||||||
|
|
||||||
|
- name: Reinstall transformers in edit mode (remove the one installed during docker image build)
|
||||||
|
working-directory: /transformers
|
||||||
|
run: python3 -m pip uninstall -y transformers && python3 -m pip install -e .
|
||||||
|
|
||||||
- name: Echo folder ${{ matrix.folders }}
|
- name: Echo folder ${{ matrix.folders }}
|
||||||
shell: bash
|
shell: bash
|
||||||
# For folders like `models/bert`, set an env. var. (`matrix_folders`) to `models_bert`, which will be used to
|
# For folders like `models/bert`, set an env. var. (`matrix_folders`) to `models_bert`, which will be used to
|
||||||
@ -122,11 +130,21 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
nvidia-smi
|
nvidia-smi
|
||||||
|
|
||||||
|
- name: Install
|
||||||
|
if: inputs.framework == 'pytorch'
|
||||||
|
working-directory: /transformers
|
||||||
|
run: |
|
||||||
|
python3 -m pip install --no-cache-dir git+https://github.com/huggingface/accelerate@main#egg=accelerate
|
||||||
|
|
||||||
- name: Environment
|
- name: Environment
|
||||||
working-directory: /transformers
|
working-directory: /transformers
|
||||||
run: |
|
run: |
|
||||||
python3 utils/print_env.py
|
python3 utils/print_env.py
|
||||||
|
|
||||||
|
- name: Show installed libraries and their versions
|
||||||
|
working-directory: /transformers
|
||||||
|
run: pip freeze
|
||||||
|
|
||||||
- name: Run all tests on GPU
|
- name: Run all tests on GPU
|
||||||
working-directory: /transformers
|
working-directory: /transformers
|
||||||
run: python3 -m pytest -v --make-reports=${{ matrix.machine_type }}_tests_gpu_${{ matrix.folders }} tests/${{ matrix.folders }}
|
run: python3 -m pytest -v --make-reports=${{ matrix.machine_type }}_tests_gpu_${{ matrix.folders }} tests/${{ matrix.folders }}
|
||||||
@ -147,9 +165,9 @@ jobs:
|
|||||||
|
|
||||||
- name: Test suite reports artifacts
|
- name: Test suite reports artifacts
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
uses: actions/upload-artifact@v2
|
uses: actions/upload-artifact@v3
|
||||||
with:
|
with:
|
||||||
name: ${{ matrix.machine_type }}_run_all_tests_gpu_${{ env.matrix_folders }}_test_reports
|
name: ${{ matrix.machine_type }}_run_all_tests_gpu_${{ env.matrix_folders }}_test_reports_postfix_${{ inputs.framework }}-${{ inputs.version }}
|
||||||
path: /transformers/reports/${{ matrix.machine_type }}_tests_gpu_${{ matrix.folders }}
|
path: /transformers/reports/${{ matrix.machine_type }}_tests_gpu_${{ matrix.folders }}
|
||||||
|
|
||||||
run_tests_multi_gpu:
|
run_tests_multi_gpu:
|
||||||
@ -169,6 +187,10 @@ jobs:
|
|||||||
working-directory: /transformers
|
working-directory: /transformers
|
||||||
run: git fetch && git checkout ${{ inputs.sha }}
|
run: git fetch && git checkout ${{ inputs.sha }}
|
||||||
|
|
||||||
|
- name: Reinstall transformers in edit mode (remove the one installed during docker image build)
|
||||||
|
working-directory: /transformers
|
||||||
|
run: python3 -m pip uninstall -y transformers && python3 -m pip install -e .
|
||||||
|
|
||||||
- name: Echo folder ${{ matrix.folders }}
|
- name: Echo folder ${{ matrix.folders }}
|
||||||
shell: bash
|
shell: bash
|
||||||
# For folders like `models/bert`, set an env. var. (`matrix_folders`) to `models_bert`, which will be used to
|
# For folders like `models/bert`, set an env. var. (`matrix_folders`) to `models_bert`, which will be used to
|
||||||
@ -184,11 +206,21 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
nvidia-smi
|
nvidia-smi
|
||||||
|
|
||||||
|
- name: Install
|
||||||
|
if: inputs.framework == 'pytorch'
|
||||||
|
working-directory: /transformers
|
||||||
|
run: |
|
||||||
|
python3 -m pip install --no-cache-dir git+https://github.com/huggingface/accelerate@main#egg=accelerate
|
||||||
|
|
||||||
- name: Environment
|
- name: Environment
|
||||||
working-directory: /transformers
|
working-directory: /transformers
|
||||||
run: |
|
run: |
|
||||||
python3 utils/print_env.py
|
python3 utils/print_env.py
|
||||||
|
|
||||||
|
- name: Show installed libraries and their versions
|
||||||
|
working-directory: /transformers
|
||||||
|
run: pip freeze
|
||||||
|
|
||||||
- name: Run all tests on GPU
|
- name: Run all tests on GPU
|
||||||
working-directory: /transformers
|
working-directory: /transformers
|
||||||
run: python3 -m pytest -v --make-reports=${{ matrix.machine_type }}_tests_gpu_${{ matrix.folders }} tests/${{ matrix.folders }}
|
run: python3 -m pytest -v --make-reports=${{ matrix.machine_type }}_tests_gpu_${{ matrix.folders }} tests/${{ matrix.folders }}
|
||||||
@ -209,16 +241,91 @@ jobs:
|
|||||||
|
|
||||||
- name: Test suite reports artifacts
|
- name: Test suite reports artifacts
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
uses: actions/upload-artifact@v2
|
uses: actions/upload-artifact@v3
|
||||||
with:
|
with:
|
||||||
name: ${{ matrix.machine_type }}_run_all_tests_gpu_${{ env.matrix_folders }}_test_reports
|
name: ${{ matrix.machine_type }}_run_all_tests_gpu_${{ env.matrix_folders }}_test_reports_postfix_${{ inputs.framework }}-${{ inputs.version }}
|
||||||
path: /transformers/reports/${{ matrix.machine_type }}_tests_gpu_${{ matrix.folders }}
|
path: /transformers/reports/${{ matrix.machine_type }}_tests_gpu_${{ matrix.folders }}
|
||||||
|
|
||||||
|
run_all_tests_torch_cuda_extensions_gpu:
|
||||||
|
name: Torch CUDA extension tests
|
||||||
|
if: inputs.framework == 'pytorch'
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
machine_type: [single-gpu, multi-gpu]
|
||||||
|
runs-on: ${{ format('{0}-{1}', matrix.machine_type, 'docker-past-ci') }}
|
||||||
|
needs: setup
|
||||||
|
container:
|
||||||
|
image: huggingface/transformers-${{ inputs.framework }}-past-${{ inputs.version }}-gpu
|
||||||
|
options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||||
|
steps:
|
||||||
|
- name: Update clone
|
||||||
|
working-directory: /transformers
|
||||||
|
run: git fetch && git checkout ${{ github.sha }}
|
||||||
|
|
||||||
|
- name: Reinstall transformers in edit mode (remove the one installed during docker image build)
|
||||||
|
working-directory: /transformers
|
||||||
|
run: python3 -m pip uninstall -y transformers && python3 -m pip install -e .
|
||||||
|
|
||||||
|
- name: Install
|
||||||
|
working-directory: /transformers
|
||||||
|
run: |
|
||||||
|
python3 -m pip install --no-cache-dir git+https://github.com/huggingface/accelerate@main#egg=accelerate
|
||||||
|
|
||||||
|
- name: Remove cached torch extensions
|
||||||
|
run: rm -rf /github/home/.cache/torch_extensions/
|
||||||
|
|
||||||
|
# To avoid unknown test failures
|
||||||
|
- name: Pre build DeepSpeed *again*
|
||||||
|
working-directory: /
|
||||||
|
run: |
|
||||||
|
python3 -m pip uninstall -y deepspeed
|
||||||
|
rm -rf DeepSpeed
|
||||||
|
git clone https://github.com/microsoft/DeepSpeed && cd DeepSpeed && rm -rf build
|
||||||
|
DS_BUILD_CPU_ADAM=1 DS_BUILD_FUSED_ADAM=1 DS_BUILD_UTILS=1 python3 -m pip install . --global-option="build_ext" --global-option="-j8" --no-cache -v --disable-pip-version-check
|
||||||
|
|
||||||
|
- name: NVIDIA-SMI
|
||||||
|
run: |
|
||||||
|
nvidia-smi
|
||||||
|
|
||||||
|
- name: Environment
|
||||||
|
working-directory: /transformers
|
||||||
|
run: |
|
||||||
|
python3 utils/print_env.py
|
||||||
|
|
||||||
|
- name: Show installed libraries and their versions
|
||||||
|
working-directory: /transformers
|
||||||
|
run: pip freeze
|
||||||
|
|
||||||
|
- name: Run all tests on GPU
|
||||||
|
working-directory: /transformers
|
||||||
|
run: |
|
||||||
|
python3 -m pytest -v --make-reports=${{ matrix.machine_type }}_tests_torch_cuda_extensions_gpu tests/deepspeed tests/extended
|
||||||
|
|
||||||
|
- name: Failure short reports
|
||||||
|
if: ${{ failure() }}
|
||||||
|
continue-on-error: true
|
||||||
|
run: cat /transformers/reports/${{ matrix.machine_type }}_tests_torch_cuda_extensions_gpu/failures_short.txt
|
||||||
|
|
||||||
|
- name: Test suite reports artifacts
|
||||||
|
if: ${{ always() }}
|
||||||
|
uses: actions/upload-artifact@v3
|
||||||
|
with:
|
||||||
|
name: ${{ matrix.machine_type }}_run_tests_torch_cuda_extensions_gpu_test_reports_postfix_${{ inputs.framework }}-${{ inputs.version }}
|
||||||
|
path: /transformers/reports/${{ matrix.machine_type }}_tests_torch_cuda_extensions_gpu
|
||||||
|
|
||||||
send_results:
|
send_results:
|
||||||
name: Send results to webhook
|
name: Send results to webhook
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
if: always()
|
if: always()
|
||||||
needs: [check_runner_status, check_runners, setup, run_tests_single_gpu, run_tests_multi_gpu]
|
needs: [
|
||||||
|
check_runner_status,
|
||||||
|
check_runners,
|
||||||
|
setup,
|
||||||
|
run_tests_single_gpu,
|
||||||
|
run_tests_multi_gpu,
|
||||||
|
run_all_tests_torch_cuda_extensions_gpu
|
||||||
|
]
|
||||||
steps:
|
steps:
|
||||||
- name: Preliminary job status
|
- name: Preliminary job status
|
||||||
shell: bash
|
shell: bash
|
||||||
@ -228,8 +335,8 @@ jobs:
|
|||||||
echo "Runner status: ${{ needs.check_runners.result }}"
|
echo "Runner status: ${{ needs.check_runners.result }}"
|
||||||
echo "Setup status: ${{ needs.setup.result }}"
|
echo "Setup status: ${{ needs.setup.result }}"
|
||||||
|
|
||||||
- uses: actions/checkout@v2
|
- uses: actions/checkout@v3
|
||||||
- uses: actions/download-artifact@v2
|
- uses: actions/download-artifact@v3
|
||||||
|
|
||||||
# Create a directory to store test failure tables in the next step
|
# Create a directory to store test failure tables in the next step
|
||||||
- name: Create directory
|
- name: Create directory
|
||||||
@ -242,6 +349,7 @@ jobs:
|
|||||||
CI_SLACK_CHANNEL_ID_DAILY: ${{ secrets.CI_SLACK_CHANNEL_ID_DAILY }}
|
CI_SLACK_CHANNEL_ID_DAILY: ${{ secrets.CI_SLACK_CHANNEL_ID_DAILY }}
|
||||||
CI_SLACK_CHANNEL_DUMMY_TESTS: ${{ secrets.CI_SLACK_CHANNEL_DUMMY_TESTS }}
|
CI_SLACK_CHANNEL_DUMMY_TESTS: ${{ secrets.CI_SLACK_CHANNEL_DUMMY_TESTS }}
|
||||||
CI_SLACK_REPORT_CHANNEL_ID: ${{ secrets.CI_SLACK_CHANNEL_ID_PAST_FUTURE }}
|
CI_SLACK_REPORT_CHANNEL_ID: ${{ secrets.CI_SLACK_CHANNEL_ID_PAST_FUTURE }}
|
||||||
|
ACCESS_REPO_INFO_TOKEN: ${{ secrets.ACCESS_REPO_INFO_TOKEN }}
|
||||||
CI_EVENT: Past CI - ${{ inputs.framework }}-${{ inputs.version }}
|
CI_EVENT: Past CI - ${{ inputs.framework }}-${{ inputs.version }}
|
||||||
RUNNER_STATUS: ${{ needs.check_runner_status.result }}
|
RUNNER_STATUS: ${{ needs.check_runner_status.result }}
|
||||||
RUNNER_ENV_STATUS: ${{ needs.check_runners.result }}
|
RUNNER_ENV_STATUS: ${{ needs.check_runners.result }}
|
||||||
@ -250,12 +358,20 @@ jobs:
|
|||||||
# `models/bert` to `models_bert` is required, as the artifact names use `_` instead of `/`.
|
# `models/bert` to `models_bert` is required, as the artifact names use `_` instead of `/`.
|
||||||
run: |
|
run: |
|
||||||
pip install slack_sdk
|
pip install slack_sdk
|
||||||
|
pip show slack_sdk
|
||||||
python utils/notification_service.py "${{ needs.setup.outputs.matrix }}"
|
python utils/notification_service.py "${{ needs.setup.outputs.matrix }}"
|
||||||
|
|
||||||
# Upload complete failure tables, as they might be big and only truncated versions could be sent to Slack.
|
# Upload complete failure tables, as they might be big and only truncated versions could be sent to Slack.
|
||||||
- name: Failure table artifacts
|
- name: Failure table artifacts
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
uses: actions/upload-artifact@v2
|
uses: actions/upload-artifact@v3
|
||||||
with:
|
with:
|
||||||
name: test_failure_tables_${{ inputs.framework }}-${{ inputs.version }}
|
name: test_failure_tables_${{ inputs.framework }}-${{ inputs.version }}
|
||||||
path: test_failure_tables
|
path: test_failure_tables
|
||||||
|
|
||||||
|
# delete-artifact
|
||||||
|
- uses: geekyeggo/delete-artifact@v2
|
||||||
|
with:
|
||||||
|
name: |
|
||||||
|
single-*
|
||||||
|
multi-*
|
||||||
2
.github/workflows/self-push-caller.yml
vendored
2
.github/workflows/self-push-caller.yml
vendored
@ -32,7 +32,7 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
for file in ${{ steps.changed-files.outputs.all_changed_files }}; do
|
for file in ${{ steps.changed-files.outputs.all_changed_files }}; do
|
||||||
if [ `basename "${file}"` = "setup.py" ]; then
|
if [ `basename "${file}"` = "setup.py" ]; then
|
||||||
echo ::set-output name=changed::"1"
|
echo "changed=1" >> $GITHUB_OUTPUT
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
|
|
||||||
|
|||||||
62
.github/workflows/self-push.yml
vendored
62
.github/workflows/self-push.yml
vendored
@ -32,7 +32,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout transformers
|
- name: Checkout transformers
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v3
|
||||||
with:
|
with:
|
||||||
fetch-depth: 2
|
fetch-depth: 2
|
||||||
|
|
||||||
@ -112,6 +112,10 @@ jobs:
|
|||||||
rm -rf tests/models/__pycache__
|
rm -rf tests/models/__pycache__
|
||||||
rm -rf reports
|
rm -rf reports
|
||||||
|
|
||||||
|
- name: Show installed libraries and their versions
|
||||||
|
working-directory: /transformers
|
||||||
|
run: pip freeze
|
||||||
|
|
||||||
- name: Fetch the tests to run
|
- name: Fetch the tests to run
|
||||||
working-directory: /transformers
|
working-directory: /transformers
|
||||||
# TODO: add `git-python` in the docker images
|
# TODO: add `git-python` in the docker images
|
||||||
@ -120,7 +124,7 @@ jobs:
|
|||||||
python3 utils/tests_fetcher.py --diff_with_last_commit | tee test_preparation.txt
|
python3 utils/tests_fetcher.py --diff_with_last_commit | tee test_preparation.txt
|
||||||
|
|
||||||
- name: Report fetched tests
|
- name: Report fetched tests
|
||||||
uses: actions/upload-artifact@v2
|
uses: actions/upload-artifact@v3
|
||||||
with:
|
with:
|
||||||
name: test_fetched
|
name: test_fetched
|
||||||
path: /transformers/test_preparation.txt
|
path: /transformers/test_preparation.txt
|
||||||
@ -141,8 +145,8 @@ jobs:
|
|||||||
fi
|
fi
|
||||||
echo $keys
|
echo $keys
|
||||||
echo $test_map
|
echo $test_map
|
||||||
echo "::set-output name=matrix::$keys"
|
echo "matrix=$keys" >> $GITHUB_OUTPUT
|
||||||
echo "::set-output name=test_map::$test_map"
|
echo "test_map=$test_map" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
run_tests_single_gpu:
|
run_tests_single_gpu:
|
||||||
name: Model tests
|
name: Model tests
|
||||||
@ -191,6 +195,10 @@ jobs:
|
|||||||
git checkout ${{ env.CI_SHA }}
|
git checkout ${{ env.CI_SHA }}
|
||||||
echo "log = $(git log -n 1)"
|
echo "log = $(git log -n 1)"
|
||||||
|
|
||||||
|
- name: Reinstall transformers in edit mode (remove the one installed during docker image build)
|
||||||
|
working-directory: /transformers
|
||||||
|
run: python3 -m pip uninstall -y transformers && python3 -m pip install -e .
|
||||||
|
|
||||||
- name: Echo folder ${{ matrix.folders }}
|
- name: Echo folder ${{ matrix.folders }}
|
||||||
shell: bash
|
shell: bash
|
||||||
# For folders like `models/bert`, set an env. var. (`matrix_folders`) to `models_bert`, which will be used to
|
# For folders like `models/bert`, set an env. var. (`matrix_folders`) to `models_bert`, which will be used to
|
||||||
@ -212,6 +220,10 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
python3 utils/print_env.py
|
python3 utils/print_env.py
|
||||||
|
|
||||||
|
- name: Show installed libraries and their versions
|
||||||
|
working-directory: /transformers
|
||||||
|
run: pip freeze
|
||||||
|
|
||||||
- name: Run all non-slow selected tests on GPU
|
- name: Run all non-slow selected tests on GPU
|
||||||
working-directory: /transformers
|
working-directory: /transformers
|
||||||
run: |
|
run: |
|
||||||
@ -224,7 +236,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Test suite reports artifacts
|
- name: Test suite reports artifacts
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
uses: actions/upload-artifact@v2
|
uses: actions/upload-artifact@v3
|
||||||
with:
|
with:
|
||||||
name: ${{ matrix.machine_type }}_run_all_tests_gpu_${{ env.matrix_folders }}_test_reports
|
name: ${{ matrix.machine_type }}_run_all_tests_gpu_${{ env.matrix_folders }}_test_reports
|
||||||
path: /transformers/reports/${{ matrix.machine_type }}_tests_gpu_${{ matrix.folders }}
|
path: /transformers/reports/${{ matrix.machine_type }}_tests_gpu_${{ matrix.folders }}
|
||||||
@ -276,6 +288,10 @@ jobs:
|
|||||||
git checkout ${{ env.CI_SHA }}
|
git checkout ${{ env.CI_SHA }}
|
||||||
echo "log = $(git log -n 1)"
|
echo "log = $(git log -n 1)"
|
||||||
|
|
||||||
|
- name: Reinstall transformers in edit mode (remove the one installed during docker image build)
|
||||||
|
working-directory: /transformers
|
||||||
|
run: python3 -m pip uninstall -y transformers && python3 -m pip install -e .
|
||||||
|
|
||||||
- name: Echo folder ${{ matrix.folders }}
|
- name: Echo folder ${{ matrix.folders }}
|
||||||
shell: bash
|
shell: bash
|
||||||
# For folders like `models/bert`, set an env. var. (`matrix_folders`) to `models_bert`, which will be used to
|
# For folders like `models/bert`, set an env. var. (`matrix_folders`) to `models_bert`, which will be used to
|
||||||
@ -297,6 +313,10 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
python3 utils/print_env.py
|
python3 utils/print_env.py
|
||||||
|
|
||||||
|
- name: Show installed libraries and their versions
|
||||||
|
working-directory: /transformers
|
||||||
|
run: pip freeze
|
||||||
|
|
||||||
- name: Run all non-slow selected tests on GPU
|
- name: Run all non-slow selected tests on GPU
|
||||||
env:
|
env:
|
||||||
MKL_SERVICE_FORCE_INTEL: 1
|
MKL_SERVICE_FORCE_INTEL: 1
|
||||||
@ -311,7 +331,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Test suite reports artifacts
|
- name: Test suite reports artifacts
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
uses: actions/upload-artifact@v2
|
uses: actions/upload-artifact@v3
|
||||||
with:
|
with:
|
||||||
name: ${{ matrix.machine_type }}_run_all_tests_gpu_${{ env.matrix_folders }}_test_reports
|
name: ${{ matrix.machine_type }}_run_all_tests_gpu_${{ env.matrix_folders }}_test_reports
|
||||||
path: /transformers/reports/${{ matrix.machine_type }}_tests_gpu_${{ matrix.folders }}
|
path: /transformers/reports/${{ matrix.machine_type }}_tests_gpu_${{ matrix.folders }}
|
||||||
@ -361,6 +381,10 @@ jobs:
|
|||||||
git checkout ${{ env.CI_SHA }}
|
git checkout ${{ env.CI_SHA }}
|
||||||
echo "log = $(git log -n 1)"
|
echo "log = $(git log -n 1)"
|
||||||
|
|
||||||
|
- name: Reinstall transformers in edit mode (remove the one installed during docker image build)
|
||||||
|
working-directory: /workspace/transformers
|
||||||
|
run: python3 -m pip uninstall -y transformers && python3 -m pip install -e .
|
||||||
|
|
||||||
- name: Remove cached torch extensions
|
- name: Remove cached torch extensions
|
||||||
run: rm -rf /github/home/.cache/torch_extensions/
|
run: rm -rf /github/home/.cache/torch_extensions/
|
||||||
|
|
||||||
@ -369,7 +393,7 @@ jobs:
|
|||||||
working-directory: /workspace
|
working-directory: /workspace
|
||||||
run: |
|
run: |
|
||||||
python3 -m pip uninstall -y deepspeed
|
python3 -m pip uninstall -y deepspeed
|
||||||
DS_BUILD_CPU_ADAM=1 DS_BUILD_FUSED_ADAM=1 DS_BUILD_AIO=1 DS_BUILD_UTILS=1 python3 -m pip install deepspeed --global-option="build_ext" --global-option="-j8" --no-cache -v --disable-pip-version-check
|
DS_BUILD_CPU_ADAM=1 DS_BUILD_FUSED_ADAM=1 DS_BUILD_UTILS=1 python3 -m pip install deepspeed --global-option="build_ext" --global-option="-j8" --no-cache -v --disable-pip-version-check
|
||||||
|
|
||||||
- name: NVIDIA-SMI
|
- name: NVIDIA-SMI
|
||||||
run: |
|
run: |
|
||||||
@ -380,6 +404,10 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
python utils/print_env.py
|
python utils/print_env.py
|
||||||
|
|
||||||
|
- name: Show installed libraries and their versions
|
||||||
|
working-directory: /workspace/transformers
|
||||||
|
run: pip freeze
|
||||||
|
|
||||||
- name: Run all non-slow selected tests on GPU
|
- name: Run all non-slow selected tests on GPU
|
||||||
working-directory: /workspace/transformers
|
working-directory: /workspace/transformers
|
||||||
# TODO: Here we pass all tests in the 2 folders for simplicity. It's better to pass only the identified tests.
|
# TODO: Here we pass all tests in the 2 folders for simplicity. It's better to pass only the identified tests.
|
||||||
@ -393,7 +421,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Test suite reports artifacts
|
- name: Test suite reports artifacts
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
uses: actions/upload-artifact@v2
|
uses: actions/upload-artifact@v3
|
||||||
with:
|
with:
|
||||||
name: ${{ matrix.machine_type }}_run_tests_torch_cuda_extensions_gpu_test_reports
|
name: ${{ matrix.machine_type }}_run_tests_torch_cuda_extensions_gpu_test_reports
|
||||||
path: /workspace/transformers/reports/${{ matrix.machine_type }}_tests_torch_cuda_extensions_gpu
|
path: /workspace/transformers/reports/${{ matrix.machine_type }}_tests_torch_cuda_extensions_gpu
|
||||||
@ -443,6 +471,10 @@ jobs:
|
|||||||
git checkout ${{ env.CI_SHA }}
|
git checkout ${{ env.CI_SHA }}
|
||||||
echo "log = $(git log -n 1)"
|
echo "log = $(git log -n 1)"
|
||||||
|
|
||||||
|
- name: Reinstall transformers in edit mode (remove the one installed during docker image build)
|
||||||
|
working-directory: /workspace/transformers
|
||||||
|
run: python3 -m pip uninstall -y transformers && python3 -m pip install -e .
|
||||||
|
|
||||||
- name: Remove cached torch extensions
|
- name: Remove cached torch extensions
|
||||||
run: rm -rf /github/home/.cache/torch_extensions/
|
run: rm -rf /github/home/.cache/torch_extensions/
|
||||||
|
|
||||||
@ -451,7 +483,7 @@ jobs:
|
|||||||
working-directory: /workspace
|
working-directory: /workspace
|
||||||
run: |
|
run: |
|
||||||
python3 -m pip uninstall -y deepspeed
|
python3 -m pip uninstall -y deepspeed
|
||||||
DS_BUILD_CPU_ADAM=1 DS_BUILD_FUSED_ADAM=1 DS_BUILD_AIO=1 DS_BUILD_UTILS=1 python3 -m pip install deepspeed --global-option="build_ext" --global-option="-j8" --no-cache -v --disable-pip-version-check
|
DS_BUILD_CPU_ADAM=1 DS_BUILD_FUSED_ADAM=1 DS_BUILD_UTILS=1 python3 -m pip install deepspeed --global-option="build_ext" --global-option="-j8" --no-cache -v --disable-pip-version-check
|
||||||
|
|
||||||
- name: NVIDIA-SMI
|
- name: NVIDIA-SMI
|
||||||
run: |
|
run: |
|
||||||
@ -462,6 +494,10 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
python utils/print_env.py
|
python utils/print_env.py
|
||||||
|
|
||||||
|
- name: Show installed libraries and their versions
|
||||||
|
working-directory: /workspace/transformers
|
||||||
|
run: pip freeze
|
||||||
|
|
||||||
- name: Run all non-slow selected tests on GPU
|
- name: Run all non-slow selected tests on GPU
|
||||||
working-directory: /workspace/transformers
|
working-directory: /workspace/transformers
|
||||||
# TODO: Here we pass all tests in the 2 folders for simplicity. It's better to pass only the identified tests.
|
# TODO: Here we pass all tests in the 2 folders for simplicity. It's better to pass only the identified tests.
|
||||||
@ -475,7 +511,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Test suite reports artifacts
|
- name: Test suite reports artifacts
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
uses: actions/upload-artifact@v2
|
uses: actions/upload-artifact@v3
|
||||||
with:
|
with:
|
||||||
name: ${{ matrix.machine_type }}_run_tests_torch_cuda_extensions_gpu_test_reports
|
name: ${{ matrix.machine_type }}_run_tests_torch_cuda_extensions_gpu_test_reports
|
||||||
path: /workspace/transformers/reports/${{ matrix.machine_type }}_tests_torch_cuda_extensions_gpu
|
path: /workspace/transformers/reports/${{ matrix.machine_type }}_tests_torch_cuda_extensions_gpu
|
||||||
@ -525,7 +561,7 @@ jobs:
|
|||||||
echo "env.CI_BRANCH = ${{ env.CI_BRANCH }}"
|
echo "env.CI_BRANCH = ${{ env.CI_BRANCH }}"
|
||||||
echo "env.CI_SHA = ${{ env.CI_SHA }}"
|
echo "env.CI_SHA = ${{ env.CI_SHA }}"
|
||||||
|
|
||||||
- uses: actions/checkout@v2
|
- uses: actions/checkout@v3
|
||||||
# To avoid failure when multiple commits are merged into `main` in a short period of time.
|
# To avoid failure when multiple commits are merged into `main` in a short period of time.
|
||||||
# Checking out to an old commit beyond the fetch depth will get an error `fatal: reference is not a tree: ...
|
# Checking out to an old commit beyond the fetch depth will get an error `fatal: reference is not a tree: ...
|
||||||
# (Only required for `workflow_run` event, where we get the latest HEAD on `main` instead of the event commit)
|
# (Only required for `workflow_run` event, where we get the latest HEAD on `main` instead of the event commit)
|
||||||
@ -540,7 +576,7 @@ jobs:
|
|||||||
git checkout ${{ env.CI_SHA }}
|
git checkout ${{ env.CI_SHA }}
|
||||||
echo "log = $(git log -n 1)"
|
echo "log = $(git log -n 1)"
|
||||||
|
|
||||||
- uses: actions/download-artifact@v2
|
- uses: actions/download-artifact@v3
|
||||||
- name: Send message to Slack
|
- name: Send message to Slack
|
||||||
env:
|
env:
|
||||||
CI_SLACK_BOT_TOKEN: ${{ secrets.CI_SLACK_BOT_TOKEN }}
|
CI_SLACK_BOT_TOKEN: ${{ secrets.CI_SLACK_BOT_TOKEN }}
|
||||||
@ -548,6 +584,7 @@ jobs:
|
|||||||
CI_SLACK_CHANNEL_ID_DAILY: ${{ secrets.CI_SLACK_CHANNEL_ID_DAILY }}
|
CI_SLACK_CHANNEL_ID_DAILY: ${{ secrets.CI_SLACK_CHANNEL_ID_DAILY }}
|
||||||
CI_SLACK_CHANNEL_DUMMY_TESTS: ${{ secrets.CI_SLACK_CHANNEL_DUMMY_TESTS }}
|
CI_SLACK_CHANNEL_DUMMY_TESTS: ${{ secrets.CI_SLACK_CHANNEL_DUMMY_TESTS }}
|
||||||
CI_SLACK_REPORT_CHANNEL_ID: ${{ secrets.CI_SLACK_CHANNEL_ID }}
|
CI_SLACK_REPORT_CHANNEL_ID: ${{ secrets.CI_SLACK_CHANNEL_ID }}
|
||||||
|
ACCESS_REPO_INFO_TOKEN: ${{ secrets.ACCESS_REPO_INFO_TOKEN }}
|
||||||
CI_EVENT: push
|
CI_EVENT: push
|
||||||
CI_TITLE_PUSH: ${{ github.event.head_commit.message }}
|
CI_TITLE_PUSH: ${{ github.event.head_commit.message }}
|
||||||
CI_TITLE_WORKFLOW_RUN: ${{ github.event.workflow_run.head_commit.message }}
|
CI_TITLE_WORKFLOW_RUN: ${{ github.event.workflow_run.head_commit.message }}
|
||||||
@ -560,4 +597,5 @@ jobs:
|
|||||||
# `models/bert` to `models_bert` is required, as the artifact names use `_` instead of `/`.
|
# `models/bert` to `models_bert` is required, as the artifact names use `_` instead of `/`.
|
||||||
run: |
|
run: |
|
||||||
pip install slack_sdk
|
pip install slack_sdk
|
||||||
|
pip show slack_sdk
|
||||||
python utils/notification_service.py "${{ needs.setup.outputs.matrix }}"
|
python utils/notification_service.py "${{ needs.setup.outputs.matrix }}"
|
||||||
|
|||||||
161
.github/workflows/self-scheduled.yml
vendored
161
.github/workflows/self-scheduled.yml
vendored
@ -9,7 +9,10 @@ name: Self-hosted runner (scheduled)
|
|||||||
on:
|
on:
|
||||||
repository_dispatch:
|
repository_dispatch:
|
||||||
schedule:
|
schedule:
|
||||||
- cron: "0 2 * * *"
|
- cron: "17 2 * * *"
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- run_scheduled_ci*
|
||||||
|
|
||||||
env:
|
env:
|
||||||
HF_HOME: /mnt/cache
|
HF_HOME: /mnt/cache
|
||||||
@ -27,7 +30,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout transformers
|
- name: Checkout transformers
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v3
|
||||||
with:
|
with:
|
||||||
fetch-depth: 2
|
fetch-depth: 2
|
||||||
|
|
||||||
@ -74,11 +77,15 @@ jobs:
|
|||||||
rm -rf tests/models/__pycache__
|
rm -rf tests/models/__pycache__
|
||||||
rm -rf reports
|
rm -rf reports
|
||||||
|
|
||||||
|
- name: Show installed libraries and their versions
|
||||||
|
working-directory: /transformers
|
||||||
|
run: pip freeze
|
||||||
|
|
||||||
- id: set-matrix
|
- id: set-matrix
|
||||||
name: Identify models to test
|
name: Identify models to test
|
||||||
working-directory: /transformers/tests
|
working-directory: /transformers/tests
|
||||||
run: |
|
run: |
|
||||||
echo "::set-output name=matrix::$(python3 -c 'import os; tests = os.getcwd(); model_tests = os.listdir(os.path.join(tests, "models")); d1 = sorted(list(filter(os.path.isdir, os.listdir(tests)))); d2 = sorted(list(filter(os.path.isdir, [f"models/{x}" for x in model_tests]))); d1.remove("models"); d = d2 + d1; print(d)')"
|
echo "matrix=$(python3 -c 'import os; tests = os.getcwd(); model_tests = os.listdir(os.path.join(tests, "models")); d1 = sorted(list(filter(os.path.isdir, os.listdir(tests)))); d2 = sorted(list(filter(os.path.isdir, [f"models/{x}" for x in model_tests]))); d1.remove("models"); d = d2 + d1; print(d)')" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
- name: NVIDIA-SMI
|
- name: NVIDIA-SMI
|
||||||
run: |
|
run: |
|
||||||
@ -112,6 +119,10 @@ jobs:
|
|||||||
working-directory: /transformers
|
working-directory: /transformers
|
||||||
run: git fetch && git checkout ${{ github.sha }}
|
run: git fetch && git checkout ${{ github.sha }}
|
||||||
|
|
||||||
|
- name: Reinstall transformers in edit mode (remove the one installed during docker image build)
|
||||||
|
working-directory: /transformers
|
||||||
|
run: python3 -m pip uninstall -y transformers && python3 -m pip install -e .
|
||||||
|
|
||||||
- name: NVIDIA-SMI
|
- name: NVIDIA-SMI
|
||||||
run: |
|
run: |
|
||||||
nvidia-smi
|
nvidia-smi
|
||||||
@ -121,6 +132,10 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
python3 utils/print_env.py
|
python3 utils/print_env.py
|
||||||
|
|
||||||
|
- name: Show installed libraries and their versions
|
||||||
|
working-directory: /transformers
|
||||||
|
run: pip freeze
|
||||||
|
|
||||||
- name: Run all tests on GPU
|
- name: Run all tests on GPU
|
||||||
working-directory: /transformers
|
working-directory: /transformers
|
||||||
run: python3 -m pytest -v --make-reports=${{ matrix.machine_type }}_tests_gpu_${{ matrix.folders }} tests/${{ matrix.folders }}
|
run: python3 -m pytest -v --make-reports=${{ matrix.machine_type }}_tests_gpu_${{ matrix.folders }} tests/${{ matrix.folders }}
|
||||||
@ -132,7 +147,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Test suite reports artifacts
|
- name: Test suite reports artifacts
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
uses: actions/upload-artifact@v2
|
uses: actions/upload-artifact@v3
|
||||||
with:
|
with:
|
||||||
name: ${{ matrix.machine_type }}_run_all_tests_gpu_${{ env.matrix_folders }}_test_reports
|
name: ${{ matrix.machine_type }}_run_all_tests_gpu_${{ env.matrix_folders }}_test_reports
|
||||||
path: /transformers/reports/${{ matrix.machine_type }}_tests_gpu_${{ matrix.folders }}
|
path: /transformers/reports/${{ matrix.machine_type }}_tests_gpu_${{ matrix.folders }}
|
||||||
@ -165,6 +180,10 @@ jobs:
|
|||||||
working-directory: /transformers
|
working-directory: /transformers
|
||||||
run: git fetch && git checkout ${{ github.sha }}
|
run: git fetch && git checkout ${{ github.sha }}
|
||||||
|
|
||||||
|
- name: Reinstall transformers in edit mode (remove the one installed during docker image build)
|
||||||
|
working-directory: /transformers
|
||||||
|
run: python3 -m pip uninstall -y transformers && python3 -m pip install -e .
|
||||||
|
|
||||||
- name: NVIDIA-SMI
|
- name: NVIDIA-SMI
|
||||||
run: |
|
run: |
|
||||||
nvidia-smi
|
nvidia-smi
|
||||||
@ -174,6 +193,10 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
python3 utils/print_env.py
|
python3 utils/print_env.py
|
||||||
|
|
||||||
|
- name: Show installed libraries and their versions
|
||||||
|
working-directory: /transformers
|
||||||
|
run: pip freeze
|
||||||
|
|
||||||
- name: Run all tests on GPU
|
- name: Run all tests on GPU
|
||||||
working-directory: /transformers
|
working-directory: /transformers
|
||||||
run: python3 -m pytest -v --make-reports=${{ matrix.machine_type }}_tests_gpu_${{ matrix.folders }} tests/${{ matrix.folders }}
|
run: python3 -m pytest -v --make-reports=${{ matrix.machine_type }}_tests_gpu_${{ matrix.folders }} tests/${{ matrix.folders }}
|
||||||
@ -185,14 +208,18 @@ jobs:
|
|||||||
|
|
||||||
- name: Test suite reports artifacts
|
- name: Test suite reports artifacts
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
uses: actions/upload-artifact@v2
|
uses: actions/upload-artifact@v3
|
||||||
with:
|
with:
|
||||||
name: ${{ matrix.machine_type }}_run_all_tests_gpu_${{ env.matrix_folders }}_test_reports
|
name: ${{ matrix.machine_type }}_run_all_tests_gpu_${{ env.matrix_folders }}_test_reports
|
||||||
path: /transformers/reports/${{ matrix.machine_type }}_tests_gpu_${{ matrix.folders }}
|
path: /transformers/reports/${{ matrix.machine_type }}_tests_gpu_${{ matrix.folders }}
|
||||||
|
|
||||||
run_examples_gpu:
|
run_examples_gpu:
|
||||||
name: Examples directory
|
name: Examples directory
|
||||||
runs-on: [self-hosted, single-gpu-docker]
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
machine_type: [single-gpu]
|
||||||
|
runs-on: ${{ format('{0}-{1}', matrix.machine_type, 'docker') }}
|
||||||
container:
|
container:
|
||||||
image: huggingface/transformers-all-latest-gpu
|
image: huggingface/transformers-all-latest-gpu
|
||||||
options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||||
@ -202,6 +229,10 @@ jobs:
|
|||||||
working-directory: /transformers
|
working-directory: /transformers
|
||||||
run: git fetch && git checkout ${{ github.sha }}
|
run: git fetch && git checkout ${{ github.sha }}
|
||||||
|
|
||||||
|
- name: Reinstall transformers in edit mode (remove the one installed during docker image build)
|
||||||
|
working-directory: /transformers
|
||||||
|
run: python3 -m pip uninstall -y transformers && python3 -m pip install -e .
|
||||||
|
|
||||||
- name: NVIDIA-SMI
|
- name: NVIDIA-SMI
|
||||||
run: |
|
run: |
|
||||||
nvidia-smi
|
nvidia-smi
|
||||||
@ -211,23 +242,27 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
python3 utils/print_env.py
|
python3 utils/print_env.py
|
||||||
|
|
||||||
|
- name: Show installed libraries and their versions
|
||||||
|
working-directory: /transformers
|
||||||
|
run: pip freeze
|
||||||
|
|
||||||
- name: Run examples tests on GPU
|
- name: Run examples tests on GPU
|
||||||
working-directory: /transformers
|
working-directory: /transformers
|
||||||
run: |
|
run: |
|
||||||
pip install -r examples/pytorch/_tests_requirements.txt
|
pip install -r examples/pytorch/_tests_requirements.txt
|
||||||
python3 -m pytest -v --make-reports=single-gpu_examples_gpu examples/pytorch
|
python3 -m pytest -v --make-reports=${{ matrix.machine_type }}_examples_gpu examples/pytorch
|
||||||
|
|
||||||
- name: Failure short reports
|
- name: Failure short reports
|
||||||
if: ${{ failure() }}
|
if: ${{ failure() }}
|
||||||
continue-on-error: true
|
continue-on-error: true
|
||||||
run: cat /transformers/reports/single-gpu_examples_gpu/failures_short.txt
|
run: cat /transformers/reports/${{ matrix.machine_type }}_examples_gpu/failures_short.txt
|
||||||
|
|
||||||
- name: Test suite reports artifacts
|
- name: Test suite reports artifacts
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
uses: actions/upload-artifact@v2
|
uses: actions/upload-artifact@v3
|
||||||
with:
|
with:
|
||||||
name: single-gpu_run_examples_gpu
|
name: ${{ matrix.machine_type }}_run_examples_gpu
|
||||||
path: /transformers/reports/single-gpu_examples_gpu
|
path: /transformers/reports/${{ matrix.machine_type }}_examples_gpu
|
||||||
|
|
||||||
run_pipelines_torch_gpu:
|
run_pipelines_torch_gpu:
|
||||||
name: PyTorch pipelines
|
name: PyTorch pipelines
|
||||||
@ -245,6 +280,10 @@ jobs:
|
|||||||
working-directory: /transformers
|
working-directory: /transformers
|
||||||
run: git fetch && git checkout ${{ github.sha }}
|
run: git fetch && git checkout ${{ github.sha }}
|
||||||
|
|
||||||
|
- name: Reinstall transformers in edit mode (remove the one installed during docker image build)
|
||||||
|
working-directory: /transformers
|
||||||
|
run: python3 -m pip uninstall -y transformers && python3 -m pip install -e .
|
||||||
|
|
||||||
- name: NVIDIA-SMI
|
- name: NVIDIA-SMI
|
||||||
run: |
|
run: |
|
||||||
nvidia-smi
|
nvidia-smi
|
||||||
@ -254,6 +293,10 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
python3 utils/print_env.py
|
python3 utils/print_env.py
|
||||||
|
|
||||||
|
- name: Show installed libraries and their versions
|
||||||
|
working-directory: /transformers
|
||||||
|
run: pip freeze
|
||||||
|
|
||||||
- name: Run all pipeline tests on GPU
|
- name: Run all pipeline tests on GPU
|
||||||
working-directory: /transformers
|
working-directory: /transformers
|
||||||
run: |
|
run: |
|
||||||
@ -266,7 +309,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Test suite reports artifacts
|
- name: Test suite reports artifacts
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
uses: actions/upload-artifact@v2
|
uses: actions/upload-artifact@v3
|
||||||
with:
|
with:
|
||||||
name: ${{ matrix.machine_type }}_run_tests_torch_pipeline_gpu
|
name: ${{ matrix.machine_type }}_run_tests_torch_pipeline_gpu
|
||||||
path: /transformers/reports/${{ matrix.machine_type }}_tests_torch_pipeline_gpu
|
path: /transformers/reports/${{ matrix.machine_type }}_tests_torch_pipeline_gpu
|
||||||
@ -288,6 +331,10 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
git fetch && git checkout ${{ github.sha }}
|
git fetch && git checkout ${{ github.sha }}
|
||||||
|
|
||||||
|
- name: Reinstall transformers in edit mode (remove the one installed during docker image build)
|
||||||
|
working-directory: /transformers
|
||||||
|
run: python3 -m pip uninstall -y transformers && python3 -m pip install -e .
|
||||||
|
|
||||||
- name: NVIDIA-SMI
|
- name: NVIDIA-SMI
|
||||||
run: |
|
run: |
|
||||||
nvidia-smi
|
nvidia-smi
|
||||||
@ -297,6 +344,10 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
python3 utils/print_env.py
|
python3 utils/print_env.py
|
||||||
|
|
||||||
|
- name: Show installed libraries and their versions
|
||||||
|
working-directory: /transformers
|
||||||
|
run: pip freeze
|
||||||
|
|
||||||
- name: Run all pipeline tests on GPU
|
- name: Run all pipeline tests on GPU
|
||||||
working-directory: /transformers
|
working-directory: /transformers
|
||||||
run: |
|
run: |
|
||||||
@ -309,7 +360,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Test suite reports artifacts
|
- name: Test suite reports artifacts
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
uses: actions/upload-artifact@v2
|
uses: actions/upload-artifact@v3
|
||||||
with:
|
with:
|
||||||
name: ${{ matrix.machine_type }}_run_tests_tf_pipeline_gpu
|
name: ${{ matrix.machine_type }}_run_tests_tf_pipeline_gpu
|
||||||
path: /transformers/reports/${{ matrix.machine_type }}_tests_tf_pipeline_gpu
|
path: /transformers/reports/${{ matrix.machine_type }}_tests_tf_pipeline_gpu
|
||||||
@ -330,6 +381,10 @@ jobs:
|
|||||||
working-directory: /workspace/transformers
|
working-directory: /workspace/transformers
|
||||||
run: git fetch && git checkout ${{ github.sha }}
|
run: git fetch && git checkout ${{ github.sha }}
|
||||||
|
|
||||||
|
- name: Reinstall transformers in edit mode (remove the one installed during docker image build)
|
||||||
|
working-directory: /workspace/transformers
|
||||||
|
run: python3 -m pip uninstall -y transformers && python3 -m pip install -e .
|
||||||
|
|
||||||
- name: Remove cached torch extensions
|
- name: Remove cached torch extensions
|
||||||
run: rm -rf /github/home/.cache/torch_extensions/
|
run: rm -rf /github/home/.cache/torch_extensions/
|
||||||
|
|
||||||
@ -338,7 +393,7 @@ jobs:
|
|||||||
working-directory: /workspace
|
working-directory: /workspace
|
||||||
run: |
|
run: |
|
||||||
python3 -m pip uninstall -y deepspeed
|
python3 -m pip uninstall -y deepspeed
|
||||||
DS_BUILD_CPU_ADAM=1 DS_BUILD_FUSED_ADAM=1 DS_BUILD_AIO=1 DS_BUILD_UTILS=1 python3 -m pip install deepspeed --global-option="build_ext" --global-option="-j8" --no-cache -v --disable-pip-version-check
|
DS_BUILD_CPU_ADAM=1 DS_BUILD_FUSED_ADAM=1 DS_BUILD_UTILS=1 python3 -m pip install deepspeed --global-option="build_ext" --global-option="-j8" --no-cache -v --disable-pip-version-check
|
||||||
|
|
||||||
- name: NVIDIA-SMI
|
- name: NVIDIA-SMI
|
||||||
run: |
|
run: |
|
||||||
@ -349,6 +404,10 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
python utils/print_env.py
|
python utils/print_env.py
|
||||||
|
|
||||||
|
- name: Show installed libraries and their versions
|
||||||
|
working-directory: /workspace/transformers
|
||||||
|
run: pip freeze
|
||||||
|
|
||||||
- name: Run all tests on GPU
|
- name: Run all tests on GPU
|
||||||
working-directory: /workspace/transformers
|
working-directory: /workspace/transformers
|
||||||
run: |
|
run: |
|
||||||
@ -361,13 +420,13 @@ jobs:
|
|||||||
|
|
||||||
- name: Test suite reports artifacts
|
- name: Test suite reports artifacts
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
uses: actions/upload-artifact@v2
|
uses: actions/upload-artifact@v3
|
||||||
with:
|
with:
|
||||||
name: ${{ matrix.machine_type }}_run_tests_torch_cuda_extensions_gpu_test_reports
|
name: ${{ matrix.machine_type }}_run_tests_torch_cuda_extensions_gpu_test_reports
|
||||||
path: /workspace/transformers/reports/${{ matrix.machine_type }}_tests_torch_cuda_extensions_gpu
|
path: /workspace/transformers/reports/${{ matrix.machine_type }}_tests_torch_cuda_extensions_gpu
|
||||||
|
|
||||||
send_results:
|
run_extract_warnings:
|
||||||
name: Send results to webhook
|
name: Extract warnings in CI artifacts
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
if: always()
|
if: always()
|
||||||
needs: [
|
needs: [
|
||||||
@ -381,6 +440,57 @@ jobs:
|
|||||||
run_pipelines_torch_gpu,
|
run_pipelines_torch_gpu,
|
||||||
run_all_tests_torch_cuda_extensions_gpu
|
run_all_tests_torch_cuda_extensions_gpu
|
||||||
]
|
]
|
||||||
|
steps:
|
||||||
|
- name: Checkout transformers
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
fetch-depth: 2
|
||||||
|
|
||||||
|
- name: Install transformers
|
||||||
|
run: pip install transformers
|
||||||
|
|
||||||
|
- name: Show installed libraries and their versions
|
||||||
|
run: pip freeze
|
||||||
|
|
||||||
|
- name: Create output directory
|
||||||
|
run: mkdir warnings_in_ci
|
||||||
|
|
||||||
|
- uses: actions/download-artifact@v3
|
||||||
|
with:
|
||||||
|
path: warnings_in_ci
|
||||||
|
|
||||||
|
- name: Show artifacts
|
||||||
|
run: echo "$(python3 -c 'import os; d = os.listdir(); print(d)')"
|
||||||
|
working-directory: warnings_in_ci
|
||||||
|
|
||||||
|
- name: Extract warnings in CI artifacts
|
||||||
|
run: |
|
||||||
|
python3 utils/extract_warnings.py --workflow_run_id ${{ github.run_id }} --output_dir warnings_in_ci --token ${{ secrets.ACCESS_REPO_INFO_TOKEN }} --from_gh
|
||||||
|
echo "$(python3 -c 'import os; import json; fp = open("warnings_in_ci/selected_warnings.json"); d = json.load(fp); d = "\n".join(d) ;print(d)')"
|
||||||
|
|
||||||
|
- name: Upload artifact
|
||||||
|
if: ${{ always() }}
|
||||||
|
uses: actions/upload-artifact@v3
|
||||||
|
with:
|
||||||
|
name: warnings_in_ci
|
||||||
|
path: warnings_in_ci/selected_warnings.json
|
||||||
|
|
||||||
|
send_results:
|
||||||
|
name: Send results to webhook
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
if: always()
|
||||||
|
needs: [
|
||||||
|
check_runner_status,
|
||||||
|
check_runners,
|
||||||
|
setup,
|
||||||
|
run_tests_single_gpu,
|
||||||
|
run_tests_multi_gpu,
|
||||||
|
run_examples_gpu,
|
||||||
|
run_pipelines_tf_gpu,
|
||||||
|
run_pipelines_torch_gpu,
|
||||||
|
run_all_tests_torch_cuda_extensions_gpu,
|
||||||
|
run_extract_warnings
|
||||||
|
]
|
||||||
steps:
|
steps:
|
||||||
- name: Preliminary job status
|
- name: Preliminary job status
|
||||||
shell: bash
|
shell: bash
|
||||||
@ -390,8 +500,8 @@ jobs:
|
|||||||
echo "Runner status: ${{ needs.check_runners.result }}"
|
echo "Runner status: ${{ needs.check_runners.result }}"
|
||||||
echo "Setup status: ${{ needs.setup.result }}"
|
echo "Setup status: ${{ needs.setup.result }}"
|
||||||
|
|
||||||
- uses: actions/checkout@v2
|
- uses: actions/checkout@v3
|
||||||
- uses: actions/download-artifact@v2
|
- uses: actions/download-artifact@v3
|
||||||
- name: Send message to Slack
|
- name: Send message to Slack
|
||||||
env:
|
env:
|
||||||
CI_SLACK_BOT_TOKEN: ${{ secrets.CI_SLACK_BOT_TOKEN }}
|
CI_SLACK_BOT_TOKEN: ${{ secrets.CI_SLACK_BOT_TOKEN }}
|
||||||
@ -399,12 +509,25 @@ jobs:
|
|||||||
CI_SLACK_CHANNEL_ID_DAILY: ${{ secrets.CI_SLACK_CHANNEL_ID_DAILY }}
|
CI_SLACK_CHANNEL_ID_DAILY: ${{ secrets.CI_SLACK_CHANNEL_ID_DAILY }}
|
||||||
CI_SLACK_CHANNEL_DUMMY_TESTS: ${{ secrets.CI_SLACK_CHANNEL_DUMMY_TESTS }}
|
CI_SLACK_CHANNEL_DUMMY_TESTS: ${{ secrets.CI_SLACK_CHANNEL_DUMMY_TESTS }}
|
||||||
CI_SLACK_REPORT_CHANNEL_ID: ${{ secrets.CI_SLACK_CHANNEL_ID_DAILY }}
|
CI_SLACK_REPORT_CHANNEL_ID: ${{ secrets.CI_SLACK_CHANNEL_ID_DAILY }}
|
||||||
|
ACCESS_REPO_INFO_TOKEN: ${{ secrets.ACCESS_REPO_INFO_TOKEN }}
|
||||||
CI_EVENT: scheduled
|
CI_EVENT: scheduled
|
||||||
|
CI_SHA: ${{ github.sha }}
|
||||||
|
CI_WORKFLOW_REF: ${{ github.workflow_ref }}
|
||||||
RUNNER_STATUS: ${{ needs.check_runner_status.result }}
|
RUNNER_STATUS: ${{ needs.check_runner_status.result }}
|
||||||
RUNNER_ENV_STATUS: ${{ needs.check_runners.result }}
|
RUNNER_ENV_STATUS: ${{ needs.check_runners.result }}
|
||||||
SETUP_STATUS: ${{ needs.setup.result }}
|
SETUP_STATUS: ${{ needs.setup.result }}
|
||||||
# We pass `needs.setup.outputs.matrix` as the argument. A processing in `notification_service.py` to change
|
# We pass `needs.setup.outputs.matrix` as the argument. A processing in `notification_service.py` to change
|
||||||
# `models/bert` to `models_bert` is required, as the artifact names use `_` instead of `/`.
|
# `models/bert` to `models_bert` is required, as the artifact names use `_` instead of `/`.
|
||||||
run: |
|
run: |
|
||||||
|
sudo apt-get install -y curl
|
||||||
pip install slack_sdk
|
pip install slack_sdk
|
||||||
|
pip show slack_sdk
|
||||||
python utils/notification_service.py "${{ needs.setup.outputs.matrix }}"
|
python utils/notification_service.py "${{ needs.setup.outputs.matrix }}"
|
||||||
|
|
||||||
|
# Upload complete failure tables, as they might be big and only truncated versions could be sent to Slack.
|
||||||
|
- name: Failure table artifacts
|
||||||
|
if: ${{ always() }}
|
||||||
|
uses: actions/upload-artifact@v3
|
||||||
|
with:
|
||||||
|
name: test_failure_tables
|
||||||
|
path: test_failure_tables
|
||||||
|
|||||||
2
.github/workflows/stale.yml
vendored
2
.github/workflows/stale.yml
vendored
@ -17,7 +17,7 @@ jobs:
|
|||||||
- name: Setup Python
|
- name: Setup Python
|
||||||
uses: actions/setup-python@v4
|
uses: actions/setup-python@v4
|
||||||
with:
|
with:
|
||||||
python-version: 3.7
|
python-version: 3.8
|
||||||
|
|
||||||
- name: Install requirements
|
- name: Install requirements
|
||||||
run: |
|
run: |
|
||||||
|
|||||||
23
.github/workflows/update_metdata.yml
vendored
23
.github/workflows/update_metdata.yml
vendored
@ -4,7 +4,7 @@ on:
|
|||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- main
|
- main
|
||||||
- update_transformers_metadata
|
- update_transformers_metadata*
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build_and_package:
|
build_and_package:
|
||||||
@ -14,27 +14,14 @@ jobs:
|
|||||||
shell: bash -l {0}
|
shell: bash -l {0}
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v2
|
- uses: actions/checkout@v3
|
||||||
|
|
||||||
- name: Load cached virtual environment
|
|
||||||
uses: actions/cache@v2
|
|
||||||
id: cache
|
|
||||||
with:
|
|
||||||
path: ~/venv/
|
|
||||||
key: v3-metadata-${{ hashFiles('setup.py') }}
|
|
||||||
|
|
||||||
- name: Create virtual environment on cache miss
|
|
||||||
if: steps.cache.outputs.cache-hit != 'true'
|
|
||||||
run: |
|
|
||||||
python -m venv ~/venv && . ~/venv/bin/activate
|
|
||||||
pip install --upgrade pip
|
|
||||||
|
|
||||||
- name: Setup environment
|
- name: Setup environment
|
||||||
run: |
|
run: |
|
||||||
. ~/venv/bin/activate
|
pip install --upgrade pip
|
||||||
pip install git+https://github.com/huggingface/transformers#egg=transformers[dev]
|
pip install datasets pandas
|
||||||
|
pip install .[torch,tf,flax]
|
||||||
|
|
||||||
- name: Update metadata
|
- name: Update metadata
|
||||||
run: |
|
run: |
|
||||||
. ~/venv/bin/activate
|
|
||||||
python utils/update_metadata.py --token ${{ secrets.SYLVAIN_HF_TOKEN }} --commit_sha ${{ github.sha }}
|
python utils/update_metadata.py --token ${{ secrets.SYLVAIN_HF_TOKEN }} --commit_sha ${{ github.sha }}
|
||||||
|
|||||||
16
.github/workflows/upload_pr_documentation.yml
vendored
Normal file
16
.github/workflows/upload_pr_documentation.yml
vendored
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
name: Upload PR Documentation
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_run:
|
||||||
|
workflows: ["Build PR Documentation"]
|
||||||
|
types:
|
||||||
|
- completed
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
uses: huggingface/doc-builder/.github/workflows/upload_pr_documentation.yml@main
|
||||||
|
with:
|
||||||
|
package_name: transformers
|
||||||
|
secrets:
|
||||||
|
hf_token: ${{ secrets.HF_DOC_BUILD_PUSH }}
|
||||||
|
comment_bot_token: ${{ secrets.COMMENT_BOT_TOKEN }}
|
||||||
5
.gitignore
vendored
5
.gitignore
vendored
@ -163,4 +163,7 @@ tags
|
|||||||
*.lock
|
*.lock
|
||||||
|
|
||||||
# DS_Store (MacOS)
|
# DS_Store (MacOS)
|
||||||
.DS_Store
|
.DS_Store
|
||||||
|
|
||||||
|
# ruff
|
||||||
|
.ruff_cache
|
||||||
@ -7,8 +7,8 @@ We as members, contributors, and leaders pledge to make participation in our
|
|||||||
community a harassment-free experience for everyone, regardless of age, body
|
community a harassment-free experience for everyone, regardless of age, body
|
||||||
size, visible or invisible disability, ethnicity, sex characteristics, gender
|
size, visible or invisible disability, ethnicity, sex characteristics, gender
|
||||||
identity and expression, level of experience, education, socio-economic status,
|
identity and expression, level of experience, education, socio-economic status,
|
||||||
nationality, personal appearance, race, religion, or sexual identity
|
nationality, personal appearance, race, caste, color, religion, or sexual
|
||||||
and orientation.
|
identity and orientation.
|
||||||
|
|
||||||
We pledge to act and interact in ways that contribute to an open, welcoming,
|
We pledge to act and interact in ways that contribute to an open, welcoming,
|
||||||
diverse, inclusive, and healthy community.
|
diverse, inclusive, and healthy community.
|
||||||
@ -23,17 +23,17 @@ community include:
|
|||||||
* Giving and gracefully accepting constructive feedback
|
* Giving and gracefully accepting constructive feedback
|
||||||
* Accepting responsibility and apologizing to those affected by our mistakes,
|
* Accepting responsibility and apologizing to those affected by our mistakes,
|
||||||
and learning from the experience
|
and learning from the experience
|
||||||
* Focusing on what is best not just for us as individuals, but for the
|
* Focusing on what is best not just for us as individuals, but for the overall
|
||||||
overall community
|
community
|
||||||
|
|
||||||
Examples of unacceptable behavior include:
|
Examples of unacceptable behavior include:
|
||||||
|
|
||||||
* The use of sexualized language or imagery, and sexual attention or
|
* The use of sexualized language or imagery, and sexual attention or advances of
|
||||||
advances of any kind
|
any kind
|
||||||
* Trolling, insulting or derogatory comments, and personal or political attacks
|
* Trolling, insulting or derogatory comments, and personal or political attacks
|
||||||
* Public or private harassment
|
* Public or private harassment
|
||||||
* Publishing others' private information, such as a physical or email
|
* Publishing others' private information, such as a physical or email address,
|
||||||
address, without their explicit permission
|
without their explicit permission
|
||||||
* Other conduct which could reasonably be considered inappropriate in a
|
* Other conduct which could reasonably be considered inappropriate in a
|
||||||
professional setting
|
professional setting
|
||||||
|
|
||||||
@ -83,15 +83,15 @@ behavior was inappropriate. A public apology may be requested.
|
|||||||
|
|
||||||
### 2. Warning
|
### 2. Warning
|
||||||
|
|
||||||
**Community Impact**: A violation through a single incident or series
|
**Community Impact**: A violation through a single incident or series of
|
||||||
of actions.
|
actions.
|
||||||
|
|
||||||
**Consequence**: A warning with consequences for continued behavior. No
|
**Consequence**: A warning with consequences for continued behavior. No
|
||||||
interaction with the people involved, including unsolicited interaction with
|
interaction with the people involved, including unsolicited interaction with
|
||||||
those enforcing the Code of Conduct, for a specified period of time. This
|
those enforcing the Code of Conduct, for a specified period of time. This
|
||||||
includes avoiding interactions in community spaces as well as external channels
|
includes avoiding interactions in community spaces as well as external channels
|
||||||
like social media. Violating these terms may lead to a temporary or
|
like social media. Violating these terms may lead to a temporary or permanent
|
||||||
permanent ban.
|
ban.
|
||||||
|
|
||||||
### 3. Temporary Ban
|
### 3. Temporary Ban
|
||||||
|
|
||||||
@ -107,23 +107,27 @@ Violating these terms may lead to a permanent ban.
|
|||||||
### 4. Permanent Ban
|
### 4. Permanent Ban
|
||||||
|
|
||||||
**Community Impact**: Demonstrating a pattern of violation of community
|
**Community Impact**: Demonstrating a pattern of violation of community
|
||||||
standards, including sustained inappropriate behavior, harassment of an
|
standards, including sustained inappropriate behavior, harassment of an
|
||||||
individual, or aggression toward or disparagement of classes of individuals.
|
individual, or aggression toward or disparagement of classes of individuals.
|
||||||
|
|
||||||
**Consequence**: A permanent ban from any sort of public interaction within
|
**Consequence**: A permanent ban from any sort of public interaction within the
|
||||||
the community.
|
community.
|
||||||
|
|
||||||
## Attribution
|
## Attribution
|
||||||
|
|
||||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
|
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
|
||||||
version 2.0, available at
|
version 2.1, available at
|
||||||
https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
|
[https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1].
|
||||||
|
|
||||||
Community Impact Guidelines were inspired by [Mozilla's code of conduct
|
Community Impact Guidelines were inspired by
|
||||||
enforcement ladder](https://github.com/mozilla/diversity).
|
[Mozilla's code of conduct enforcement ladder][Mozilla CoC].
|
||||||
|
|
||||||
[homepage]: https://www.contributor-covenant.org
|
|
||||||
|
|
||||||
For answers to common questions about this code of conduct, see the FAQ at
|
For answers to common questions about this code of conduct, see the FAQ at
|
||||||
https://www.contributor-covenant.org/faq. Translations are available at
|
[https://www.contributor-covenant.org/faq][FAQ]. Translations are available at
|
||||||
https://www.contributor-covenant.org/translations.
|
[https://www.contributor-covenant.org/translations][translations].
|
||||||
|
|
||||||
|
[homepage]: https://www.contributor-covenant.org
|
||||||
|
[v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html
|
||||||
|
[Mozilla CoC]: https://github.com/mozilla/diversity
|
||||||
|
[FAQ]: https://www.contributor-covenant.org/faq
|
||||||
|
[translations]: https://www.contributor-covenant.org/translations
|
||||||
|
|||||||
412
CONTRIBUTING.md
412
CONTRIBUTING.md
@ -14,349 +14,337 @@ See the License for the specific language governing permissions and
|
|||||||
limitations under the License.
|
limitations under the License.
|
||||||
-->
|
-->
|
||||||
|
|
||||||
# How to contribute to transformers?
|
# Contribute to 🤗 Transformers
|
||||||
|
|
||||||
Everyone is welcome to contribute, and we value everybody's contribution. Code
|
Everyone is welcome to contribute, and we value everybody's contribution. Code
|
||||||
is thus not the only way to help the community. Answering questions, helping
|
contributions are not the only way to help the community. Answering questions, helping
|
||||||
others, reaching out and improving the documentations are immensely valuable to
|
others, and improving the documentation are also immensely valuable.
|
||||||
the community.
|
|
||||||
|
|
||||||
It also helps us if you spread the word: reference the library from blog posts
|
It also helps us if you spread the word! Reference the library in blog posts
|
||||||
on the awesome projects it made possible, shout out on Twitter every time it has
|
about the awesome projects it made possible, shout out on Twitter every time it has
|
||||||
helped you, or simply star the repo to say "thank you".
|
helped you, or simply ⭐️ the repository to say thank you.
|
||||||
|
|
||||||
Whichever way you choose to contribute, please be mindful to respect our
|
However you choose to contribute, please be mindful and respect our
|
||||||
[code of conduct](https://github.com/huggingface/transformers/blob/main/CODE_OF_CONDUCT.md).
|
[code of conduct](https://github.com/huggingface/transformers/blob/main/CODE_OF_CONDUCT.md).
|
||||||
|
|
||||||
## You can contribute in so many ways!
|
**This guide was heavily inspired by the awesome [scikit-learn guide to contributing](https://github.com/scikit-learn/scikit-learn/blob/main/CONTRIBUTING.md).**
|
||||||
|
|
||||||
There are 4 ways you can contribute to transformers:
|
## Ways to contribute
|
||||||
* Fixing outstanding issues with the existing code;
|
|
||||||
* Implementing new models;
|
|
||||||
* Contributing to the examples or to the documentation;
|
|
||||||
* Submitting issues related to bugs or desired new features.
|
|
||||||
|
|
||||||
In particular there is a special [Good First
|
There are several ways you can contribute to 🤗 Transformers:
|
||||||
|
|
||||||
|
* Fix outstanding issues with the existing code.
|
||||||
|
* Submit issues related to bugs or desired new features.
|
||||||
|
* Implement new models.
|
||||||
|
* Contribute to the examples or to the documentation.
|
||||||
|
|
||||||
|
If you don't know where to start, there is a special [Good First
|
||||||
Issue](https://github.com/huggingface/transformers/contribute) listing. It will give you a list of
|
Issue](https://github.com/huggingface/transformers/contribute) listing. It will give you a list of
|
||||||
open Issues that are open to anybody to work on. Just comment in the issue that you'd like to work
|
open issues that are beginner-friendly and help you start contributing to open-source. Just comment in the issue that you'd like to work
|
||||||
on it. In that same listing you will also find some Issues with `Good Second Issue` label. These are
|
on it.
|
||||||
typically slightly more complicated than the Issues with just `Good First Issue` label. But if you
|
|
||||||
feel you know what you're doing, go for it.
|
|
||||||
|
|
||||||
*All are equally valuable to the community.*
|
For something slightly more challenging, you can also take a look at the [Good Second Issue](https://github.com/huggingface/transformers/labels/Good%20Second%20Issue) list. In general though, if you feel like you know what you're doing, go for it and we'll help you get there! 🚀
|
||||||
|
|
||||||
## Submitting a new issue or feature request
|
> All contributions are equally valuable to the community. 🥰
|
||||||
|
|
||||||
Do your best to follow these guidelines when submitting an issue or a feature
|
## Fixing outstanding issues
|
||||||
|
|
||||||
|
If you notice an issue with the existing code and have a fix in mind, feel free to [start contributing](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md/#create-a-pull-request) and open a Pull Request!
|
||||||
|
|
||||||
|
## Submitting a bug-related issue or feature request
|
||||||
|
|
||||||
|
Do your best to follow these guidelines when submitting a bug-related issue or a feature
|
||||||
request. It will make it easier for us to come back to you quickly and with good
|
request. It will make it easier for us to come back to you quickly and with good
|
||||||
feedback.
|
feedback.
|
||||||
|
|
||||||
### Did you find a bug?
|
### Did you find a bug?
|
||||||
|
|
||||||
The 🤗 Transformers library is robust and reliable thanks to the users who notify us of
|
The 🤗 Transformers library is robust and reliable thanks to users who report the problems they encounter.
|
||||||
the problems they encounter. So thank you for reporting an issue.
|
|
||||||
|
|
||||||
First, we would really appreciate it if you could **make sure the bug was not
|
Before you report an issue, we would really appreciate it if you could **make sure the bug was not
|
||||||
already reported** (use the search bar on Github under Issues).
|
already reported** (use the search bar on GitHub under Issues). Your issue should also be related to bugs in the library itself, and not your code. If you're unsure whether the bug is in your code or the library, please ask on the [forum](https://discuss.huggingface.co/) first. This helps us respond quicker to fixing issues related to the library versus general questions.
|
||||||
|
|
||||||
Did not find it? :( So we can act quickly on it, please follow these steps:
|
Once you've confirmed the bug hasn't already been reported, please include the following information in your issue so we can quickly resolve it:
|
||||||
|
|
||||||
* Include your **OS type and version**, the versions of **Python**, **PyTorch** and
|
* Your **OS type and version** and **Python**, **PyTorch** and
|
||||||
**Tensorflow** when applicable;
|
**TensorFlow** versions when applicable.
|
||||||
* A short, self-contained, code snippet that allows us to reproduce the bug in
|
* A short, self-contained, code snippet that allows us to reproduce the bug in
|
||||||
less than 30s;
|
less than 30s.
|
||||||
* Provide the *full* traceback if an exception is raised.
|
* The *full* traceback if an exception is raised.
|
||||||
|
* Attach any other additional information, like screenshots, you think may help.
|
||||||
|
|
||||||
To get the OS and software versions automatically, you can run the following command:
|
To get the OS and software versions automatically, run the following command:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
transformers-cli env
|
transformers-cli env
|
||||||
```
|
```
|
||||||
|
|
||||||
or from the root of the repository the following command:
|
You can also run the same command from the root of the repository:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
python src/transformers/commands/transformers_cli.py env
|
python src/transformers/commands/transformers_cli.py env
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Do you want a new feature?
|
||||||
|
|
||||||
### Do you want to implement a new model?
|
If there is a new feature you'd like to see in 🤗 Transformers, please open an issue and describe:
|
||||||
|
|
||||||
Awesome! Please provide the following information:
|
1. What is the *motivation* behind this feature? Is it related to a problem or frustration with the library? Is it a feature related to something you need for a project? Is it something you worked on and think it could benefit the community?
|
||||||
|
|
||||||
* Short description of the model and link to the paper;
|
Whatever it is, we'd love to hear about it!
|
||||||
* Link to the implementation if it is open-source;
|
|
||||||
|
2. Describe your requested feature in as much detail as possible. The more you can tell us about it, the better we'll be able to help you.
|
||||||
|
3. Provide a *code snippet* that demonstrates the features usage.
|
||||||
|
4. If the feature is related to a paper, please include a link.
|
||||||
|
|
||||||
|
If your issue is well written we're already 80% of the way there by the time you create it.
|
||||||
|
|
||||||
|
We have added [templates](https://github.com/huggingface/transformers/tree/main/templates) to help you get started with your issue.
|
||||||
|
|
||||||
|
## Do you want to implement a new model?
|
||||||
|
|
||||||
|
New models are constantly released and if you want to implement a new model, please provide the following information
|
||||||
|
|
||||||
|
* A short description of the model and link to the paper.
|
||||||
|
* Link to the implementation if it is open-sourced.
|
||||||
* Link to the model weights if they are available.
|
* Link to the model weights if they are available.
|
||||||
|
|
||||||
If you are willing to contribute the model yourself, let us know so we can best
|
If you are willing to contribute the model yourself, let us know so we can help you add it to 🤗 Transformers!
|
||||||
guide you.
|
|
||||||
|
|
||||||
We have added a **detailed guide and templates** to guide you in the process of adding a new model. You can find them
|
We have added a [detailed guide and templates](https://github.com/huggingface/transformers/tree/main/templates) to help you get started with adding a new model, and we also have a more technical guide for [how to add a model to 🤗 Transformers](https://huggingface.co/docs/transformers/add_new_model).
|
||||||
in the [`templates`](https://github.com/huggingface/transformers/tree/main/templates) folder.
|
|
||||||
|
|
||||||
### Do you want a new feature (that is not a model)?
|
## Do you want to add documentation?
|
||||||
|
|
||||||
A world-class feature request addresses the following points:
|
We're always looking for improvements to the documentation that make it more clear and accurate. Please let us know how the documentation can be improved such as typos and any content that is missing, unclear or inaccurate. We'll be happy to make the changes or help you make a contribution if you're interested!
|
||||||
|
|
||||||
1. Motivation first:
|
For more details about how to generate, build, and write the documentation, take a look at the documentation [README](https://github.com/huggingface/transformers/tree/main/docs).
|
||||||
* Is it related to a problem/frustration with the library? If so, please explain
|
|
||||||
why. Providing a code snippet that demonstrates the problem is best.
|
|
||||||
* Is it related to something you would need for a project? We'd love to hear
|
|
||||||
about it!
|
|
||||||
* Is it something you worked on and think could benefit the community?
|
|
||||||
Awesome! Tell us what problem it solved for you.
|
|
||||||
2. Write a *full paragraph* describing the feature;
|
|
||||||
3. Provide a **code snippet** that demonstrates its future use;
|
|
||||||
4. In case this is related to a paper, please attach a link;
|
|
||||||
5. Attach any additional information (drawings, screenshots, etc.) you think may help.
|
|
||||||
|
|
||||||
If your issue is well written we're already 80% of the way there by the time you
|
## Create a Pull Request
|
||||||
post it.
|
|
||||||
|
|
||||||
We have added **templates** to guide you in the process of adding a new example script for training or testing the
|
Before writing any code, we strongly advise you to search through the existing PRs or
|
||||||
models in the library. You can find them in the [`templates`](https://github.com/huggingface/transformers/tree/main/templates)
|
issues to make sure nobody is already working on the same thing. If you are
|
||||||
folder.
|
|
||||||
|
|
||||||
## Start contributing! (Pull Requests)
|
|
||||||
|
|
||||||
Before writing code, we strongly advise you to search through the existing PRs or
|
|
||||||
issues to make sure that nobody is already working on the same thing. If you are
|
|
||||||
unsure, it is always a good idea to open an issue to get some feedback.
|
unsure, it is always a good idea to open an issue to get some feedback.
|
||||||
|
|
||||||
You will need basic `git` proficiency to be able to contribute to
|
You will need basic `git` proficiency to contribute to
|
||||||
🤗 Transformers. `git` is not the easiest tool to use but it has the greatest
|
🤗 Transformers. While `git` is not the easiest tool to use, it has the greatest
|
||||||
manual. Type `git --help` in a shell and enjoy. If you prefer books, [Pro
|
manual. Type `git --help` in a shell and enjoy! If you prefer books, [Pro
|
||||||
Git](https://git-scm.com/book/en/v2) is a very good reference.
|
Git](https://git-scm.com/book/en/v2) is a very good reference.
|
||||||
|
|
||||||
Follow these steps to start contributing ([supported Python versions](https://github.com/huggingface/transformers/blob/main/setup.py#L426)):
|
You'll need **[Python 3.8]((https://github.com/huggingface/transformers/blob/main/setup.py#L426))** or above to contribute to 🤗 Transformers. Follow the steps below to start contributing:
|
||||||
|
|
||||||
1. Fork the [repository](https://github.com/huggingface/transformers) by
|
1. Fork the [repository](https://github.com/huggingface/transformers) by
|
||||||
clicking on the 'Fork' button on the repository's page. This creates a copy of the code
|
clicking on the **[Fork](https://github.com/huggingface/transformers/fork)** button on the repository's page. This creates a copy of the code
|
||||||
under your GitHub user account.
|
under your GitHub user account.
|
||||||
|
|
||||||
2. Clone your fork to your local disk, and add the base repository as a remote:
|
2. Clone your fork to your local disk, and add the base repository as a remote:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ git clone git@github.com:<your Github handle>/transformers.git
|
git clone git@github.com:<your Github handle>/transformers.git
|
||||||
$ cd transformers
|
cd transformers
|
||||||
$ git remote add upstream https://github.com/huggingface/transformers.git
|
git remote add upstream https://github.com/huggingface/transformers.git
|
||||||
```
|
```
|
||||||
|
|
||||||
3. Create a new branch to hold your development changes:
|
3. Create a new branch to hold your development changes:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ git checkout -b a-descriptive-name-for-my-changes
|
git checkout -b a-descriptive-name-for-my-changes
|
||||||
```
|
```
|
||||||
|
|
||||||
**Do not** work on the `main` branch.
|
🚨 **Do not** work on the `main` branch!
|
||||||
|
|
||||||
4. Set up a development environment by running the following command in a virtual environment:
|
4. Set up a development environment by running the following command in a virtual environment:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ pip install -e ".[dev]"
|
pip install -e ".[dev]"
|
||||||
```
|
```
|
||||||
|
|
||||||
(If transformers was already installed in the virtual environment, remove
|
If 🤗 Transformers was already installed in the virtual environment, remove
|
||||||
it with `pip uninstall transformers` before reinstalling it in editable
|
it with `pip uninstall transformers` before reinstalling it in editable
|
||||||
mode with the `-e` flag.)
|
mode with the `-e` flag.
|
||||||
|
|
||||||
To run the full test suite, you might need the additional dependency on `datasets` which requires a separate source
|
Depending on your OS, and since the number of optional dependencies of Transformers is growing, you might get a
|
||||||
install:
|
failure with this command. If that's the case make sure to install the Deep Learning framework you are working with
|
||||||
|
(PyTorch, TensorFlow and/or Flax) then do:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ git clone https://github.com/huggingface/datasets
|
pip install -e ".[quality]"
|
||||||
$ cd datasets
|
|
||||||
$ pip install -e .
|
|
||||||
```
|
```
|
||||||
|
|
||||||
If you have already cloned that repo, you might need to `git pull` to get the most recent changes in the `datasets`
|
which should be enough for most use cases.
|
||||||
library.
|
|
||||||
|
|
||||||
Depending on your OS, you might need to install some external libraries, as well, if the `pip` installation fails.
|
|
||||||
|
|
||||||
For macOS, you will likely need [MeCab](https://taku910.github.io/mecab/), which can be installed from Homebrew:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
brew install mecab
|
|
||||||
```
|
|
||||||
|
|
||||||
5. Develop the features on your branch.
|
5. Develop the features on your branch.
|
||||||
|
|
||||||
As you work on the features, you should make sure that the test suite
|
As you work on your code, you should make sure the test suite
|
||||||
passes. You should run the tests impacted by your changes like this:
|
passes. Run the tests impacted by your changes like this:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ pytest tests/<TEST_TO_RUN>.py
|
pytest tests/<TEST_TO_RUN>.py
|
||||||
```
|
|
||||||
|
|
||||||
You can also run the full suite with the following command, but it takes
|
|
||||||
a beefy machine to produce a result in a decent amount of time now that
|
|
||||||
Transformers has grown a lot. Here is the command for it:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
$ make test
|
|
||||||
```
|
```
|
||||||
|
|
||||||
For more information about tests, check out the
|
For more information about tests, check out the
|
||||||
[dedicated documentation](https://huggingface.co/docs/transformers/testing)
|
[Testing](https://huggingface.co/docs/transformers/testing) guide.
|
||||||
|
|
||||||
🤗 Transformers relies on `black` and `isort` to format its source code
|
🤗 Transformers relies on `black` and `ruff` to format its source code
|
||||||
consistently. After you make changes, apply automatic style corrections and code verifications
|
consistently. After you make changes, apply automatic style corrections and code verifications
|
||||||
that can't be automated in one go with:
|
that can't be automated in one go with:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ make fixup
|
make fixup
|
||||||
```
|
```
|
||||||
|
|
||||||
This target is also optimized to only work with files modified by the PR you're working on.
|
This target is also optimized to only work with files modified by the PR you're working on.
|
||||||
|
|
||||||
If you prefer to run the checks one after the other, the following command apply the
|
If you prefer to run the checks one after the other, the following command applies the
|
||||||
style corrections:
|
style corrections:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ make style
|
make style
|
||||||
```
|
```
|
||||||
|
|
||||||
🤗 Transformers also uses `flake8` and a few custom scripts to check for coding mistakes. Quality
|
🤗 Transformers also uses `ruff` and a few custom scripts to check for coding mistakes. Quality
|
||||||
control runs in CI, however you can also run the same checks with:
|
controls are run by the CI, but you can run the same checks with:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ make quality
|
make quality
|
||||||
```
|
```
|
||||||
|
|
||||||
Finally we have a lot of scripts that check we didn't forget to update
|
Finally, we have a lot of scripts to make sure we didn't forget to update
|
||||||
some files when adding a new model, that you can run with
|
some files when adding a new model. You can run these scripts with:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ make repo-consistency
|
make repo-consistency
|
||||||
```
|
```
|
||||||
|
|
||||||
To learn more about those checks and how to fix any issue with them, check out the
|
To learn more about those checks and how to fix any issues with them, check out the
|
||||||
[documentation](https://huggingface.co/docs/transformers/pr_checks)
|
[Checks on a Pull Request](https://huggingface.co/docs/transformers/pr_checks) guide.
|
||||||
|
|
||||||
If you're modifying documents under `docs/source`, make sure to validate that
|
If you're modifying documents under `docs/source` directory, make sure the documentation can still be built. This check will also run in the CI when you open a pull request. To run a local check
|
||||||
they can still be built. This check also runs in CI. To run a local check
|
make sure you install the documentation builder:
|
||||||
make sure you have installed the documentation builder requirements. First you will need to clone the
|
|
||||||
repository containing our tools to build the documentation:
|
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ pip install git+https://github.com/huggingface/doc-builder
|
pip install ".[docs]"
|
||||||
```
|
```
|
||||||
|
|
||||||
Then, make sure you have all the dependencies to be able to build the doc with:
|
Run the following command from the root of the repository:
|
||||||
|
|
||||||
```bash
|
|
||||||
$ pip install ".[docs]"
|
|
||||||
```
|
|
||||||
|
|
||||||
Finally run the following command from the root of the repository:
|
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ doc-builder build transformers docs/source/ --build_dir ~/tmp/test-build
|
doc-builder build transformers docs/source/en --build_dir ~/tmp/test-build
|
||||||
```
|
```
|
||||||
|
|
||||||
This will build the documentation in the `~/tmp/test-build` folder where you can inspect the generated
|
This will build the documentation in the `~/tmp/test-build` folder where you can inspect the generated
|
||||||
Markdown files with your favorite editor. You won't be able to see the final rendering on the website
|
Markdown files with your favorite editor. You can also preview the docs on GitHub when you open a pull request.
|
||||||
before your PR is merged, we are actively working on adding a tool for this.
|
|
||||||
|
|
||||||
Once you're happy with your changes, add changed files using `git add` and
|
Once you're happy with your changes, add changed files with `git add` and
|
||||||
make a commit with `git commit` to record your changes locally:
|
record your changes locally with `git commit`:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ git add modified_file.py
|
git add modified_file.py
|
||||||
$ git commit
|
git commit
|
||||||
```
|
```
|
||||||
|
|
||||||
Please write [good commit
|
Please remember to write [good commit
|
||||||
messages](https://chris.beams.io/posts/git-commit/).
|
messages](https://chris.beams.io/posts/git-commit/) to clearly communicate the changes you made!
|
||||||
|
|
||||||
It is a good idea to sync your copy of the code with the original
|
To keep your copy of the code up to date with the original
|
||||||
repository regularly. This way you can quickly account for changes:
|
repository, rebase your branch on `upstream/branch` *before* you open a pull request or if requested by a maintainer:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ git fetch upstream
|
git fetch upstream
|
||||||
$ git rebase upstream/main
|
git rebase upstream/main
|
||||||
```
|
```
|
||||||
|
|
||||||
Push the changes to your account using:
|
Push your changes to your branch:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ git push -u origin a-descriptive-name-for-my-changes
|
git push -u origin a-descriptive-name-for-my-changes
|
||||||
```
|
```
|
||||||
|
|
||||||
6. Once you are satisfied (**and the checklist below is happy too**), go to the
|
If you've already opened a pull request, you'll need to force push with the `--force` flag. Otherwise, if the pull request hasn't been opened yet, you can just push your changes normally.
|
||||||
webpage of your fork on GitHub. Click on 'Pull request' to send your changes
|
|
||||||
to the project maintainers for review.
|
|
||||||
|
|
||||||
7. It's ok if maintainers ask you for changes. It happens to core contributors
|
6. Now you can go to your fork of the repository on GitHub and click on **Pull request** to open a pull request. Make sure you tick off all the boxes in our [checklist](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md/#pull-request-checklist) below. When you're ready, you can send your changes to the project maintainers for review.
|
||||||
too! So everyone can see the changes in the Pull request, work in your local
|
|
||||||
|
7. It's ok if maintainers request changes, it happens to our core contributors
|
||||||
|
too! So everyone can see the changes in the pull request, work in your local
|
||||||
branch and push the changes to your fork. They will automatically appear in
|
branch and push the changes to your fork. They will automatically appear in
|
||||||
the pull request.
|
the pull request.
|
||||||
|
|
||||||
|
### Pull request checklist
|
||||||
|
|
||||||
### Checklist
|
☐ The pull request title should summarize your contribution.<br>
|
||||||
|
☐ If your pull request addresses an issue, please mention the issue number in the pull
|
||||||
1. The title of your pull request should be a summary of its contribution;
|
request description to make sure they are linked (and people viewing the issue know you
|
||||||
2. If your pull request addresses an issue, please mention the issue number in
|
are working on it).<br>
|
||||||
the pull request description to make sure they are linked (and people
|
☐ To indicate a work in progress please prefix the title with `[WIP]`. These are
|
||||||
consulting the issue know you are working on it);
|
useful to avoid duplicated work, and to differentiate it from PRs ready to be merged.<br>
|
||||||
3. To indicate a work in progress please prefix the title with `[WIP]`. These
|
☐ Make sure existing tests pass.<br>
|
||||||
are useful to avoid duplicated work, and to differentiate it from PRs ready
|
☐ If adding a new feature, also add tests for it.<br>
|
||||||
to be merged;
|
- If you are adding a new model, make sure you use
|
||||||
4. Make sure existing tests pass;
|
`ModelTester.all_model_classes = (MyModel, MyModelWithLMHead,...)` to trigger the common tests.
|
||||||
5. Add high-coverage tests. No quality testing = no merge.
|
|
||||||
- If you are adding a new model, make sure that you use
|
|
||||||
`ModelTester.all_model_classes = (MyModel, MyModelWithLMHead,...)`, which triggers the common tests.
|
|
||||||
- If you are adding new `@slow` tests, make sure they pass using
|
- If you are adding new `@slow` tests, make sure they pass using
|
||||||
`RUN_SLOW=1 python -m pytest tests/test_my_new_model.py`.
|
`RUN_SLOW=1 python -m pytest tests/models/my_new_model/test_my_new_model.py`.
|
||||||
- If you are adding a new tokenizer, write tests, and make sure
|
- If you are adding a new tokenizer, write tests and make sure
|
||||||
`RUN_SLOW=1 python -m pytest tests/test_tokenization_{your_model_name}.py` passes.
|
`RUN_SLOW=1 python -m pytest tests/models/{your_model_name}/test_tokenization_{your_model_name}.py` passes.
|
||||||
CircleCI does not run the slow tests, but github actions does every night!
|
- CircleCI does not run the slow tests, but GitHub Actions does every night!<br>
|
||||||
6. All public methods must have informative docstrings that work nicely with sphinx. See `modeling_bert.py` for an
|
|
||||||
example.
|
|
||||||
7. Due to the rapidly growing repository, it is important to make sure that no files that would significantly weigh down the repository are added. This includes images, videos and other non-text files. We prefer to leverage a hf.co hosted `dataset` like
|
|
||||||
the ones hosted on [`hf-internal-testing`](https://huggingface.co/hf-internal-testing) in which to place these files and reference
|
|
||||||
them by URL. We recommend putting them in the following dataset: [huggingface/documentation-images](https://huggingface.co/datasets/huggingface/documentation-images).
|
|
||||||
If an external contribution, feel free to add the images to your PR and ask a Hugging Face member to migrate your images
|
|
||||||
to this dataset.
|
|
||||||
|
|
||||||
See more about the checks run on a pull request in our [PR guide](pr_checks)
|
☐ All public methods must have informative docstrings (see
|
||||||
|
[`modeling_bert.py`](https://github.com/huggingface/transformers/blob/main/src/transformers/models/bert/modeling_bert.py)
|
||||||
|
for an example).<br>
|
||||||
|
☐ Due to the rapidly growing repository, don't add any images, videos and other
|
||||||
|
non-text files that'll significantly weigh down the repository. Instead, use a Hub
|
||||||
|
repository such as [`hf-internal-testing`](https://huggingface.co/hf-internal-testing)
|
||||||
|
to host these files and reference them by URL. We recommend placing documentation
|
||||||
|
related images in the following repository:
|
||||||
|
[huggingface/documentation-images](https://huggingface.co/datasets/huggingface/documentation-images).
|
||||||
|
You can open a PR on this dataset repostitory and ask a Hugging Face member to merge it.
|
||||||
|
|
||||||
|
For more information about the checks run on a pull request, take a look at our [Checks on a Pull Request](https://huggingface.co/docs/transformers/pr_checks) guide.
|
||||||
|
|
||||||
### Tests
|
### Tests
|
||||||
|
|
||||||
An extensive test suite is included to test the library behavior and several examples. Library tests can be found in
|
An extensive test suite is included to test the library behavior and several examples. Library tests can be found in
|
||||||
the [tests folder](https://github.com/huggingface/transformers/tree/main/tests) and examples tests in the
|
the [tests](https://github.com/huggingface/transformers/tree/main/tests) folder and examples tests in the
|
||||||
[examples folder](https://github.com/huggingface/transformers/tree/main/examples).
|
[examples](https://github.com/huggingface/transformers/tree/main/examples) folder.
|
||||||
|
|
||||||
We like `pytest` and `pytest-xdist` because it's faster. From the root of the
|
We like `pytest` and `pytest-xdist` because it's faster. From the root of the
|
||||||
repository, here's how to run tests with `pytest` for the library:
|
repository, specify a *path to a subfolder or a test file* to run the test.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ python -m pytest -n auto --dist=loadfile -s -v ./tests/
|
python -m pytest -n auto --dist=loadfile -s -v ./tests/models/my_new_model
|
||||||
```
|
```
|
||||||
|
|
||||||
and for the examples:
|
Similarly, for the `examples` directory, specify a *path to a subfolder or test file* to run the test. For example, the following command tests the text classification subfolder in the PyTorch `examples` directory:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ pip install -r examples/xxx/requirements.txt # only needed the first time
|
pip install -r examples/xxx/requirements.txt # only needed the first time
|
||||||
$ python -m pytest -n auto --dist=loadfile -s -v ./examples/
|
python -m pytest -n auto --dist=loadfile -s -v ./examples/pytorch/text-classification
|
||||||
```
|
```
|
||||||
In fact, that's how `make test` and `make test-examples` are implemented (sans the `pip install` line)!
|
|
||||||
|
|
||||||
You can specify a smaller set of tests in order to test only the feature
|
In fact, this is actually how our `make test` and `make test-examples` commands are implemented (not including the `pip install`)!
|
||||||
|
|
||||||
|
You can also specify a smaller set of tests in order to test only the feature
|
||||||
you're working on.
|
you're working on.
|
||||||
|
|
||||||
By default, slow tests are skipped. Set the `RUN_SLOW` environment variable to
|
By default, slow tests are skipped but you can set the `RUN_SLOW` environment variable to
|
||||||
`yes` to run them. This will download many gigabytes of models — make sure you
|
`yes` to run them. This will download many gigabytes of models so make sure you
|
||||||
have enough disk space and a good Internet connection, or a lot of patience!
|
have enough disk space, a good internet connection or a lot of patience!
|
||||||
|
|
||||||
|
<Tip warning={true}>
|
||||||
|
|
||||||
|
Remember to specify a *path to a subfolder or a test file* to run the test. Otherwise, you'll run all the tests in the `tests` or `examples` folder, which will take a very long time!
|
||||||
|
|
||||||
|
</Tip>
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ RUN_SLOW=yes python -m pytest -n auto --dist=loadfile -s -v ./tests/
|
RUN_SLOW=yes python -m pytest -n auto --dist=loadfile -s -v ./tests/models/my_new_model
|
||||||
$ RUN_SLOW=yes python -m pytest -n auto --dist=loadfile -s -v ./examples/
|
RUN_SLOW=yes python -m pytest -n auto --dist=loadfile -s -v ./examples/pytorch/text-classification
|
||||||
```
|
```
|
||||||
|
|
||||||
Likewise, set the `RUN_CUSTOM_TOKENIZERS` environment variable to `yes` to run
|
Like the slow tests, there are other environment variables available which not enabled by default during testing:
|
||||||
tests for custom tokenizers, which don't run by default either.
|
- `RUN_CUSTOM_TOKENIZERS`: Enables tests for custom tokenizers.
|
||||||
|
- `RUN_PT_FLAX_CROSS_TESTS`: Enables tests for PyTorch + Flax integration.
|
||||||
|
- `RUN_PT_TF_CROSS_TESTS`: Enables tests for TensorFlow + PyTorch integration.
|
||||||
|
|
||||||
|
More environment variables and additional information can be found in the [testing_utils.py](src/transformers/testing_utils.py).
|
||||||
|
|
||||||
🤗 Transformers uses `pytest` as a test runner only. It doesn't use any
|
🤗 Transformers uses `pytest` as a test runner only. It doesn't use any
|
||||||
`pytest`-specific features in the test suite itself.
|
`pytest`-specific features in the test suite itself.
|
||||||
@ -365,43 +353,43 @@ This means `unittest` is fully supported. Here's how to run tests with
|
|||||||
`unittest`:
|
`unittest`:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ python -m unittest discover -s tests -t . -v
|
python -m unittest discover -s tests -t . -v
|
||||||
$ python -m unittest discover -s examples -t examples -v
|
python -m unittest discover -s examples -t examples -v
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
### Style guide
|
### Style guide
|
||||||
|
|
||||||
For documentation strings, 🤗 Transformers follows the [google style](https://google.github.io/styleguide/pyguide.html).
|
For documentation strings, 🤗 Transformers follows the [Google Python Style Guide](https://google.github.io/styleguide/pyguide.html).
|
||||||
Check our [documentation writing guide](https://github.com/huggingface/transformers/tree/main/docs#writing-documentation---specification)
|
Check our [documentation writing guide](https://github.com/huggingface/transformers/tree/main/docs#writing-documentation---specification)
|
||||||
for more information.
|
for more information.
|
||||||
|
|
||||||
**This guide was heavily inspired by the awesome [scikit-learn guide to contributing](https://github.com/scikit-learn/scikit-learn/blob/main/CONTRIBUTING.md).**
|
|
||||||
|
|
||||||
### Develop on Windows
|
### Develop on Windows
|
||||||
|
|
||||||
On windows, you need to configure git to transform Windows `CRLF` line endings to Linux `LF` line endings:
|
On Windows (unless you're working in [Windows Subsystem for Linux](https://learn.microsoft.com/en-us/windows/wsl/) or WSL), you need to configure git to transform Windows `CRLF` line endings to Linux `LF` line endings:
|
||||||
|
|
||||||
`git config core.autocrlf input`
|
```bash
|
||||||
|
git config core.autocrlf input
|
||||||
|
```
|
||||||
|
|
||||||
One way one can run the make command on Window is to pass by MSYS2:
|
One way to run the `make` command on Windows is with MSYS2:
|
||||||
|
|
||||||
1. [Download MSYS2](https://www.msys2.org/), we assume to have it installed in C:\msys64
|
1. [Download MSYS2](https://www.msys2.org/), and we assume it's installed in `C:\msys64`.
|
||||||
2. Open the command line C:\msys64\msys2.exe (it should be available from the start menu)
|
2. Open the command line `C:\msys64\msys2.exe` (it should be available from the **Start** menu).
|
||||||
3. Run in the shell: `pacman -Syu` and install make with `pacman -S make`
|
3. Run in the shell: `pacman -Syu` and install `make` with `pacman -S make`.
|
||||||
4. Add `C:\msys64\usr\bin` to your PATH environment variable.
|
4. Add `C:\msys64\usr\bin` to your PATH environment variable.
|
||||||
|
|
||||||
You can now use `make` from any terminal (Powershell, cmd.exe, etc) 🎉
|
You can now use `make` from any terminal (Powershell, cmd.exe, etc.)! 🎉
|
||||||
|
|
||||||
### Syncing forked main with upstream (HuggingFace) main
|
### Sync a forked repository with upstream main (the Hugging Face repository)
|
||||||
|
|
||||||
To avoid pinging the upstream repository which adds reference notes to each upstream PR and sends unnecessary notifications to the developers involved in these PRs,
|
When updating the main branch of a forked repository, please follow these steps to avoid pinging the upstream repository which adds reference notes to each upstream PR, and sends unnecessary notifications to the developers involved in these PRs.
|
||||||
when syncing the main branch of a forked repository, please, follow these steps:
|
|
||||||
1. When possible, avoid syncing with the upstream using a branch and PR on the forked repository. Instead merge directly into the forked main.
|
1. When possible, avoid syncing with the upstream using a branch and PR on the forked repository. Instead, merge directly into the forked main.
|
||||||
2. If a PR is absolutely necessary, use the following steps after checking out your branch:
|
2. If a PR is absolutely necessary, use the following steps after checking out your branch:
|
||||||
```
|
|
||||||
$ git checkout -b your-branch-for-syncing
|
```bash
|
||||||
$ git pull --squash --no-commit upstream main
|
git checkout -b your-branch-for-syncing
|
||||||
$ git commit -m '<your message without GitHub references>'
|
git pull --squash --no-commit upstream main
|
||||||
$ git push --set-upstream origin your-branch-for-syncing
|
git commit -m '<your message without GitHub references>'
|
||||||
|
git push --set-upstream origin your-branch-for-syncing
|
||||||
```
|
```
|
||||||
|
|||||||
@ -18,7 +18,7 @@ limitations under the License.
|
|||||||
|
|
||||||
This is an Open Source Project so please be mindful that like in any other project of this kind there is no obligation to answer all requests for help.
|
This is an Open Source Project so please be mindful that like in any other project of this kind there is no obligation to answer all requests for help.
|
||||||
|
|
||||||
However, we want to encourage you to ask for help whenever you think it's needed! We are happy about every question we get because it allows us to better understand your needs, possible misunderstandings, and most importantly a way for you to help us make this library better. That being said, this document's main purpose is to provide guidelines at how you can formulate your requests to increase your chances to be understood and to get support.
|
However, we want to encourage you to ask for help whenever you think it's needed! We are happy about every question we get because it allows us to better understand your needs, possible misunderstandings, and most importantly a way for you to help us make this library better. That being said, this document's main purpose is to provide guidelines at how you can formulate your requests to increase your chances to be understood and to get support.
|
||||||
|
|
||||||
There are two main venues to receive support: [the forums](https://discuss.huggingface.co/) and [the GitHub issues](https://github.com/huggingface/transformers/issues).
|
There are two main venues to receive support: [the forums](https://discuss.huggingface.co/) and [the GitHub issues](https://github.com/huggingface/transformers/issues).
|
||||||
|
|
||||||
@ -158,7 +158,7 @@ You are not required to read the following guidelines before opening an issue. H
|
|||||||
--do_train --n_train 500 --num_train_epochs 1 \
|
--do_train --n_train 500 --num_train_epochs 1 \
|
||||||
--per_device_train_batch_size 1 --freeze_embeds \
|
--per_device_train_batch_size 1 --freeze_embeds \
|
||||||
--src_lang en_XX --tgt_lang ro_RO --task translation \
|
--src_lang en_XX --tgt_lang ro_RO --task translation \
|
||||||
--fp16 --sharded_ddp
|
--fp16
|
||||||
```
|
```
|
||||||
|
|
||||||
If you don't break it up, one has to scroll horizontally which often makes it quite difficult to quickly see what's happening.
|
If you don't break it up, one has to scroll horizontally which often makes it quite difficult to quickly see what's happening.
|
||||||
|
|||||||
@ -1 +0,0 @@
|
|||||||
include LICENSE
|
|
||||||
26
Makefile
26
Makefile
@ -9,9 +9,8 @@ modified_only_fixup:
|
|||||||
$(eval modified_py_files := $(shell python utils/get_modified_files.py $(check_dirs)))
|
$(eval modified_py_files := $(shell python utils/get_modified_files.py $(check_dirs)))
|
||||||
@if test -n "$(modified_py_files)"; then \
|
@if test -n "$(modified_py_files)"; then \
|
||||||
echo "Checking/fixing $(modified_py_files)"; \
|
echo "Checking/fixing $(modified_py_files)"; \
|
||||||
black --preview $(modified_py_files); \
|
black $(modified_py_files); \
|
||||||
isort $(modified_py_files); \
|
ruff $(modified_py_files) --fix; \
|
||||||
flake8 $(modified_py_files); \
|
|
||||||
else \
|
else \
|
||||||
echo "No library .py files were modified"; \
|
echo "No library .py files were modified"; \
|
||||||
fi
|
fi
|
||||||
@ -40,17 +39,18 @@ repo-consistency:
|
|||||||
python utils/check_repo.py
|
python utils/check_repo.py
|
||||||
python utils/check_inits.py
|
python utils/check_inits.py
|
||||||
python utils/check_config_docstrings.py
|
python utils/check_config_docstrings.py
|
||||||
python utils/tests_fetcher.py --sanity_check
|
python utils/check_config_attributes.py
|
||||||
|
python utils/check_doctest_list.py
|
||||||
python utils/update_metadata.py --check-only
|
python utils/update_metadata.py --check-only
|
||||||
|
python utils/check_task_guides.py
|
||||||
|
|
||||||
# this target runs checks on all files
|
# this target runs checks on all files
|
||||||
|
|
||||||
quality:
|
quality:
|
||||||
black --check --preview $(check_dirs)
|
black --check $(check_dirs) setup.py conftest.py
|
||||||
isort --check-only $(check_dirs)
|
|
||||||
python utils/custom_init_isort.py --check_only
|
python utils/custom_init_isort.py --check_only
|
||||||
python utils/sort_auto_mappings.py --check_only
|
python utils/sort_auto_mappings.py --check_only
|
||||||
flake8 $(check_dirs)
|
ruff $(check_dirs) setup.py conftest.py
|
||||||
doc-builder style src/transformers docs/source --max_len 119 --check_only --path_to_docs docs/source
|
doc-builder style src/transformers docs/source --max_len 119 --check_only --path_to_docs docs/source
|
||||||
python utils/check_doc_toc.py
|
python utils/check_doc_toc.py
|
||||||
|
|
||||||
@ -65,8 +65,8 @@ extra_style_checks:
|
|||||||
# this target runs checks on all files and potentially modifies some of them
|
# this target runs checks on all files and potentially modifies some of them
|
||||||
|
|
||||||
style:
|
style:
|
||||||
black --preview $(check_dirs)
|
black $(check_dirs) setup.py conftest.py
|
||||||
isort $(check_dirs)
|
ruff $(check_dirs) setup.py conftest.py --fix
|
||||||
${MAKE} autogenerate_code
|
${MAKE} autogenerate_code
|
||||||
${MAKE} extra_style_checks
|
${MAKE} extra_style_checks
|
||||||
|
|
||||||
@ -80,6 +80,7 @@ fix-copies:
|
|||||||
python utils/check_copies.py --fix_and_overwrite
|
python utils/check_copies.py --fix_and_overwrite
|
||||||
python utils/check_table.py --fix_and_overwrite
|
python utils/check_table.py --fix_and_overwrite
|
||||||
python utils/check_dummies.py --fix_and_overwrite
|
python utils/check_dummies.py --fix_and_overwrite
|
||||||
|
python utils/check_task_guides.py --fix_and_overwrite
|
||||||
|
|
||||||
# Run tests for the library
|
# Run tests for the library
|
||||||
|
|
||||||
@ -110,3 +111,10 @@ post-release:
|
|||||||
|
|
||||||
post-patch:
|
post-patch:
|
||||||
python utils/release.py --post_release --patch
|
python utils/release.py --post_release --patch
|
||||||
|
|
||||||
|
build-release:
|
||||||
|
rm -rf dist
|
||||||
|
rm -rf build
|
||||||
|
python setup.py bdist_wheel
|
||||||
|
python setup.py sdist
|
||||||
|
python utils/check_build.py
|
||||||
|
|||||||
123
README.md
123
README.md
@ -15,10 +15,15 @@ limitations under the License.
|
|||||||
-->
|
-->
|
||||||
|
|
||||||
<p align="center">
|
<p align="center">
|
||||||
<br>
|
<picture>
|
||||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers_logo_name.png" width="400"/>
|
<source media="(prefers-color-scheme: dark)" srcset="https://huggingface.co/datasets/huggingface/documentation-images/raw/main/transformers-logo-dark.svg">
|
||||||
<br>
|
<source media="(prefers-color-scheme: light)" srcset="https://huggingface.co/datasets/huggingface/documentation-images/raw/main/transformers-logo-light.svg">
|
||||||
<p>
|
<img alt="Hugging Face Transformers Library" src="https://huggingface.co/datasets/huggingface/documentation-images/raw/main/transformers-logo-light.svg" width="352" height="59" style="max-width: 100%;">
|
||||||
|
</picture>
|
||||||
|
<br/>
|
||||||
|
<br/>
|
||||||
|
</p>
|
||||||
|
|
||||||
<p align="center">
|
<p align="center">
|
||||||
<a href="https://circleci.com/gh/huggingface/transformers">
|
<a href="https://circleci.com/gh/huggingface/transformers">
|
||||||
<img alt="Build" src="https://img.shields.io/circleci/build/github/huggingface/transformers/main">
|
<img alt="Build" src="https://img.shields.io/circleci/build/github/huggingface/transformers/main">
|
||||||
@ -44,7 +49,9 @@ limitations under the License.
|
|||||||
<a href="https://github.com/huggingface/transformers/blob/main/README_zh-hans.md">简体中文</a> |
|
<a href="https://github.com/huggingface/transformers/blob/main/README_zh-hans.md">简体中文</a> |
|
||||||
<a href="https://github.com/huggingface/transformers/blob/main/README_zh-hant.md">繁體中文</a> |
|
<a href="https://github.com/huggingface/transformers/blob/main/README_zh-hant.md">繁體中文</a> |
|
||||||
<a href="https://github.com/huggingface/transformers/blob/main/README_ko.md">한국어</a> |
|
<a href="https://github.com/huggingface/transformers/blob/main/README_ko.md">한국어</a> |
|
||||||
<a href="https://github.com/huggingface/transformers/blob/main/README_es.md">Español</a>
|
<a href="https://github.com/huggingface/transformers/blob/main/README_es.md">Español</a> |
|
||||||
|
<a href="https://github.com/huggingface/transformers/blob/main/README_ja.md">日本語</a> |
|
||||||
|
<a href="https://github.com/huggingface/transformers/blob/main/README_hd.md">हिन्दी</a>
|
||||||
<p>
|
<p>
|
||||||
</h4>
|
</h4>
|
||||||
|
|
||||||
@ -89,16 +96,35 @@ In Computer Vision:
|
|||||||
- [Image classification with ViT](https://huggingface.co/google/vit-base-patch16-224)
|
- [Image classification with ViT](https://huggingface.co/google/vit-base-patch16-224)
|
||||||
- [Object Detection with DETR](https://huggingface.co/facebook/detr-resnet-50)
|
- [Object Detection with DETR](https://huggingface.co/facebook/detr-resnet-50)
|
||||||
- [Semantic Segmentation with SegFormer](https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512)
|
- [Semantic Segmentation with SegFormer](https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512)
|
||||||
- [Panoptic Segmentation with DETR](https://huggingface.co/facebook/detr-resnet-50-panoptic)
|
- [Panoptic Segmentation with MaskFormer](https://huggingface.co/facebook/maskformer-swin-small-coco)
|
||||||
|
- [Depth Estimation with DPT](https://huggingface.co/docs/transformers/model_doc/dpt)
|
||||||
|
- [Video Classification with VideoMAE](https://huggingface.co/docs/transformers/model_doc/videomae)
|
||||||
|
- [Universal Segmentation with OneFormer](https://huggingface.co/shi-labs/oneformer_ade20k_dinat_large)
|
||||||
|
|
||||||
In Audio:
|
In Audio:
|
||||||
- [Automatic Speech Recognition with Wav2Vec2](https://huggingface.co/facebook/wav2vec2-base-960h)
|
- [Automatic Speech Recognition with Wav2Vec2](https://huggingface.co/facebook/wav2vec2-base-960h)
|
||||||
- [Keyword Spotting with Wav2Vec2](https://huggingface.co/superb/wav2vec2-base-superb-ks)
|
- [Keyword Spotting with Wav2Vec2](https://huggingface.co/superb/wav2vec2-base-superb-ks)
|
||||||
|
- [Audio Classification with Audio Spectrogram Transformer](https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593)
|
||||||
|
|
||||||
In Multimodal tasks:
|
In Multimodal tasks:
|
||||||
|
- [Table Question Answering with TAPAS](https://huggingface.co/google/tapas-base-finetuned-wtq)
|
||||||
- [Visual Question Answering with ViLT](https://huggingface.co/dandelin/vilt-b32-finetuned-vqa)
|
- [Visual Question Answering with ViLT](https://huggingface.co/dandelin/vilt-b32-finetuned-vqa)
|
||||||
|
- [Zero-shot Image Classification with CLIP](https://huggingface.co/openai/clip-vit-large-patch14)
|
||||||
|
- [Document Question Answering with LayoutLM](https://huggingface.co/impira/layoutlm-document-qa)
|
||||||
|
- [Zero-shot Video Classification with X-CLIP](https://huggingface.co/docs/transformers/model_doc/xclip)
|
||||||
|
|
||||||
**[Write With Transformer](https://transformer.huggingface.co)**, built by the Hugging Face team, is the official demo of this repo’s text generation capabilities.
|
|
||||||
|
## 100 projects using Transformers
|
||||||
|
|
||||||
|
Transformers is more than a toolkit to use pretrained models: it's a community of projects built around it and the
|
||||||
|
Hugging Face Hub. We want Transformers to enable developers, researchers, students, professors, engineers, and anyone
|
||||||
|
else to build their dream projects.
|
||||||
|
|
||||||
|
In order to celebrate the 100,000 stars of transformers, we have decided to put the spotlight on the
|
||||||
|
community, and we have created the [awesome-transformers](./awesome-transformers.md) page which lists 100
|
||||||
|
incredible projects built in the vicinity of transformers.
|
||||||
|
|
||||||
|
If you own or use a project that you believe should be part of the list, please open a PR to add it!
|
||||||
|
|
||||||
## If you are looking for custom support from the Hugging Face team
|
## If you are looking for custom support from the Hugging Face team
|
||||||
|
|
||||||
@ -153,7 +179,7 @@ Many tasks have a pre-trained `pipeline` ready to go, in NLP but also in compute
|
|||||||
'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}}]
|
'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}}]
|
||||||
```
|
```
|
||||||
|
|
||||||
Here we get a list of objects detected in the image, with a box surrounding the object and a confidence score. Here is the original image on the right, with the predictions displayed on the left:
|
Here we get a list of objects detected in the image, with a box surrounding the object and a confidence score. Here is the original image on the left, with the predictions displayed on the right:
|
||||||
|
|
||||||
<h3 align="center">
|
<h3 align="center">
|
||||||
<a><img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/coco_sample.png" width="400"></a>
|
<a><img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/coco_sample.png" width="400"></a>
|
||||||
@ -221,7 +247,7 @@ The model itself is a regular [Pytorch `nn.Module`](https://pytorch.org/docs/sta
|
|||||||
|
|
||||||
### With pip
|
### With pip
|
||||||
|
|
||||||
This repository is tested on Python 3.6+, Flax 0.3.2+, PyTorch 1.3.1+ and TensorFlow 2.3+.
|
This repository is tested on Python 3.7+, Flax 0.4.1+, PyTorch 1.9+ and TensorFlow 2.4+.
|
||||||
|
|
||||||
You should install 🤗 Transformers in a [virtual environment](https://docs.python.org/3/library/venv.html). If you're unfamiliar with Python virtual environments, check out the [user guide](https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/).
|
You should install 🤗 Transformers in a [virtual environment](https://docs.python.org/3/library/venv.html). If you're unfamiliar with Python virtual environments, check out the [user guide](https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/).
|
||||||
|
|
||||||
@ -254,13 +280,18 @@ Follow the installation pages of Flax, PyTorch or TensorFlow to see how to insta
|
|||||||
|
|
||||||
## Model architectures
|
## Model architectures
|
||||||
|
|
||||||
**[All the model checkpoints](https://huggingface.co/models)** provided by 🤗 Transformers are seamlessly integrated from the huggingface.co [model hub](https://huggingface.co) where they are uploaded directly by [users](https://huggingface.co/users) and [organizations](https://huggingface.co/organizations).
|
**[All the model checkpoints](https://huggingface.co/models)** provided by 🤗 Transformers are seamlessly integrated from the huggingface.co [model hub](https://huggingface.co/models) where they are uploaded directly by [users](https://huggingface.co/users) and [organizations](https://huggingface.co/organizations).
|
||||||
|
|
||||||
Current number of checkpoints: 
|
Current number of checkpoints: 
|
||||||
|
|
||||||
🤗 Transformers currently provides the following architectures (see [here](https://huggingface.co/docs/transformers/model_summary) for a high-level summary of each them):
|
🤗 Transformers currently provides the following architectures (see [here](https://huggingface.co/docs/transformers/model_summary) for a high-level summary of each them):
|
||||||
|
|
||||||
1. **[ALBERT](https://huggingface.co/docs/transformers/model_doc/albert)** (from Google Research and the Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.
|
1. **[ALBERT](https://huggingface.co/docs/transformers/model_doc/albert)** (from Google Research and the Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.
|
||||||
|
1. **[ALIGN](https://huggingface.co/docs/transformers/model_doc/align)** (from Google Research) released with the paper [Scaling Up Visual and Vision-Language Representation Learning With Noisy Text Supervision](https://arxiv.org/abs/2102.05918) by Chao Jia, Yinfei Yang, Ye Xia, Yi-Ting Chen, Zarana Parekh, Hieu Pham, Quoc V. Le, Yunhsuan Sung, Zhen Li, Tom Duerig.
|
||||||
|
1. **[AltCLIP](https://huggingface.co/docs/transformers/model_doc/altclip)** (from BAAI) released with the paper [AltCLIP: Altering the Language Encoder in CLIP for Extended Language Capabilities](https://arxiv.org/abs/2211.06679) by Chen, Zhongzhi and Liu, Guang and Zhang, Bo-Wen and Ye, Fulong and Yang, Qinghong and Wu, Ledell.
|
||||||
|
1. **[Audio Spectrogram Transformer](https://huggingface.co/docs/transformers/model_doc/audio-spectrogram-transformer)** (from MIT) released with the paper [AST: Audio Spectrogram Transformer](https://arxiv.org/abs/2104.01778) by Yuan Gong, Yu-An Chung, James Glass.
|
||||||
|
1. **[Autoformer](https://huggingface.co/docs/transformers/model_doc/autoformer)** (from Tsinghua University) released with the paper [Autoformer: Decomposition Transformers with Auto-Correlation for Long-Term Series Forecasting](https://arxiv.org/abs/2106.13008) by Haixu Wu, Jiehui Xu, Jianmin Wang, Mingsheng Long.
|
||||||
|
1. **[Bark](https://huggingface.co/docs/transformers/model_doc/bark)** (from Suno) released in the repository [suno-ai/bark](https://github.com/suno-ai/bark) by Suno AI team.
|
||||||
1. **[BART](https://huggingface.co/docs/transformers/model_doc/bart)** (from Facebook) released with the paper [BART: Denoising Sequence-to-Sequence Pre-training for Natural Language Generation, Translation, and Comprehension](https://arxiv.org/abs/1910.13461) by Mike Lewis, Yinhan Liu, Naman Goyal, Marjan Ghazvininejad, Abdelrahman Mohamed, Omer Levy, Ves Stoyanov and Luke Zettlemoyer.
|
1. **[BART](https://huggingface.co/docs/transformers/model_doc/bart)** (from Facebook) released with the paper [BART: Denoising Sequence-to-Sequence Pre-training for Natural Language Generation, Translation, and Comprehension](https://arxiv.org/abs/1910.13461) by Mike Lewis, Yinhan Liu, Naman Goyal, Marjan Ghazvininejad, Abdelrahman Mohamed, Omer Levy, Ves Stoyanov and Luke Zettlemoyer.
|
||||||
1. **[BARThez](https://huggingface.co/docs/transformers/model_doc/barthez)** (from École polytechnique) released with the paper [BARThez: a Skilled Pretrained French Sequence-to-Sequence Model](https://arxiv.org/abs/2010.12321) by Moussa Kamal Eddine, Antoine J.-P. Tixier, Michalis Vazirgiannis.
|
1. **[BARThez](https://huggingface.co/docs/transformers/model_doc/barthez)** (from École polytechnique) released with the paper [BARThez: a Skilled Pretrained French Sequence-to-Sequence Model](https://arxiv.org/abs/2010.12321) by Moussa Kamal Eddine, Antoine J.-P. Tixier, Michalis Vazirgiannis.
|
||||||
1. **[BARTpho](https://huggingface.co/docs/transformers/model_doc/bartpho)** (from VinAI Research) released with the paper [BARTpho: Pre-trained Sequence-to-Sequence Models for Vietnamese](https://arxiv.org/abs/2109.09701) by Nguyen Luong Tran, Duong Minh Le and Dat Quoc Nguyen.
|
1. **[BARTpho](https://huggingface.co/docs/transformers/model_doc/bartpho)** (from VinAI Research) released with the paper [BARTpho: Pre-trained Sequence-to-Sequence Models for Vietnamese](https://arxiv.org/abs/2109.09701) by Nguyen Luong Tran, Duong Minh Le and Dat Quoc Nguyen.
|
||||||
@ -270,19 +301,29 @@ Current number of checkpoints: ** (from VinAI Research) released with the paper [BERTweet: A pre-trained language model for English Tweets](https://aclanthology.org/2020.emnlp-demos.2/) by Dat Quoc Nguyen, Thanh Vu and Anh Tuan Nguyen.
|
1. **[BERTweet](https://huggingface.co/docs/transformers/model_doc/bertweet)** (from VinAI Research) released with the paper [BERTweet: A pre-trained language model for English Tweets](https://aclanthology.org/2020.emnlp-demos.2/) by Dat Quoc Nguyen, Thanh Vu and Anh Tuan Nguyen.
|
||||||
1. **[BigBird-Pegasus](https://huggingface.co/docs/transformers/model_doc/bigbird_pegasus)** (from Google Research) released with the paper [Big Bird: Transformers for Longer Sequences](https://arxiv.org/abs/2007.14062) by Manzil Zaheer, Guru Guruganesh, Avinava Dubey, Joshua Ainslie, Chris Alberti, Santiago Ontanon, Philip Pham, Anirudh Ravula, Qifan Wang, Li Yang, Amr Ahmed.
|
1. **[BigBird-Pegasus](https://huggingface.co/docs/transformers/model_doc/bigbird_pegasus)** (from Google Research) released with the paper [Big Bird: Transformers for Longer Sequences](https://arxiv.org/abs/2007.14062) by Manzil Zaheer, Guru Guruganesh, Avinava Dubey, Joshua Ainslie, Chris Alberti, Santiago Ontanon, Philip Pham, Anirudh Ravula, Qifan Wang, Li Yang, Amr Ahmed.
|
||||||
1. **[BigBird-RoBERTa](https://huggingface.co/docs/transformers/model_doc/big_bird)** (from Google Research) released with the paper [Big Bird: Transformers for Longer Sequences](https://arxiv.org/abs/2007.14062) by Manzil Zaheer, Guru Guruganesh, Avinava Dubey, Joshua Ainslie, Chris Alberti, Santiago Ontanon, Philip Pham, Anirudh Ravula, Qifan Wang, Li Yang, Amr Ahmed.
|
1. **[BigBird-RoBERTa](https://huggingface.co/docs/transformers/model_doc/big_bird)** (from Google Research) released with the paper [Big Bird: Transformers for Longer Sequences](https://arxiv.org/abs/2007.14062) by Manzil Zaheer, Guru Guruganesh, Avinava Dubey, Joshua Ainslie, Chris Alberti, Santiago Ontanon, Philip Pham, Anirudh Ravula, Qifan Wang, Li Yang, Amr Ahmed.
|
||||||
|
1. **[BioGpt](https://huggingface.co/docs/transformers/model_doc/biogpt)** (from Microsoft Research AI4Science) released with the paper [BioGPT: generative pre-trained transformer for biomedical text generation and mining](https://academic.oup.com/bib/advance-article/doi/10.1093/bib/bbac409/6713511?guestAccessKey=a66d9b5d-4f83-4017-bb52-405815c907b9) by Renqian Luo, Liai Sun, Yingce Xia, Tao Qin, Sheng Zhang, Hoifung Poon and Tie-Yan Liu.
|
||||||
|
1. **[BiT](https://huggingface.co/docs/transformers/model_doc/bit)** (from Google AI) released with the paper [Big Transfer (BiT): General Visual Representation Learning](https://arxiv.org/abs/1912.11370) by Alexander Kolesnikov, Lucas Beyer, Xiaohua Zhai, Joan Puigcerver, Jessica Yung, Sylvain Gelly, Neil Houlsby.
|
||||||
1. **[Blenderbot](https://huggingface.co/docs/transformers/model_doc/blenderbot)** (from Facebook) released with the paper [Recipes for building an open-domain chatbot](https://arxiv.org/abs/2004.13637) by Stephen Roller, Emily Dinan, Naman Goyal, Da Ju, Mary Williamson, Yinhan Liu, Jing Xu, Myle Ott, Kurt Shuster, Eric M. Smith, Y-Lan Boureau, Jason Weston.
|
1. **[Blenderbot](https://huggingface.co/docs/transformers/model_doc/blenderbot)** (from Facebook) released with the paper [Recipes for building an open-domain chatbot](https://arxiv.org/abs/2004.13637) by Stephen Roller, Emily Dinan, Naman Goyal, Da Ju, Mary Williamson, Yinhan Liu, Jing Xu, Myle Ott, Kurt Shuster, Eric M. Smith, Y-Lan Boureau, Jason Weston.
|
||||||
1. **[BlenderbotSmall](https://huggingface.co/docs/transformers/model_doc/blenderbot-small)** (from Facebook) released with the paper [Recipes for building an open-domain chatbot](https://arxiv.org/abs/2004.13637) by Stephen Roller, Emily Dinan, Naman Goyal, Da Ju, Mary Williamson, Yinhan Liu, Jing Xu, Myle Ott, Kurt Shuster, Eric M. Smith, Y-Lan Boureau, Jason Weston.
|
1. **[BlenderbotSmall](https://huggingface.co/docs/transformers/model_doc/blenderbot-small)** (from Facebook) released with the paper [Recipes for building an open-domain chatbot](https://arxiv.org/abs/2004.13637) by Stephen Roller, Emily Dinan, Naman Goyal, Da Ju, Mary Williamson, Yinhan Liu, Jing Xu, Myle Ott, Kurt Shuster, Eric M. Smith, Y-Lan Boureau, Jason Weston.
|
||||||
1. **[BLOOM](https://huggingface.co/docs/transformers/model_doc/bloom)** (from BigScience workshop) released by the [BigSicence Workshop](https://bigscience.huggingface.co/).
|
1. **[BLIP](https://huggingface.co/docs/transformers/model_doc/blip)** (from Salesforce) released with the paper [BLIP: Bootstrapping Language-Image Pre-training for Unified Vision-Language Understanding and Generation](https://arxiv.org/abs/2201.12086) by Junnan Li, Dongxu Li, Caiming Xiong, Steven Hoi.
|
||||||
|
1. **[BLIP-2](https://huggingface.co/docs/transformers/model_doc/blip-2)** (from Salesforce) released with the paper [BLIP-2: Bootstrapping Language-Image Pre-training with Frozen Image Encoders and Large Language Models](https://arxiv.org/abs/2301.12597) by Junnan Li, Dongxu Li, Silvio Savarese, Steven Hoi.
|
||||||
|
1. **[BLOOM](https://huggingface.co/docs/transformers/model_doc/bloom)** (from BigScience workshop) released by the [BigScience Workshop](https://bigscience.huggingface.co/).
|
||||||
1. **[BORT](https://huggingface.co/docs/transformers/model_doc/bort)** (from Alexa) released with the paper [Optimal Subarchitecture Extraction For BERT](https://arxiv.org/abs/2010.10499) by Adrian de Wynter and Daniel J. Perry.
|
1. **[BORT](https://huggingface.co/docs/transformers/model_doc/bort)** (from Alexa) released with the paper [Optimal Subarchitecture Extraction For BERT](https://arxiv.org/abs/2010.10499) by Adrian de Wynter and Daniel J. Perry.
|
||||||
|
1. **[BridgeTower](https://huggingface.co/docs/transformers/model_doc/bridgetower)** (from Harbin Institute of Technology/Microsoft Research Asia/Intel Labs) released with the paper [BridgeTower: Building Bridges Between Encoders in Vision-Language Representation Learning](https://arxiv.org/abs/2206.08657) by Xiao Xu, Chenfei Wu, Shachar Rosenman, Vasudev Lal, Wanxiang Che, Nan Duan.
|
||||||
1. **[ByT5](https://huggingface.co/docs/transformers/model_doc/byt5)** (from Google Research) released with the paper [ByT5: Towards a token-free future with pre-trained byte-to-byte models](https://arxiv.org/abs/2105.13626) by Linting Xue, Aditya Barua, Noah Constant, Rami Al-Rfou, Sharan Narang, Mihir Kale, Adam Roberts, Colin Raffel.
|
1. **[ByT5](https://huggingface.co/docs/transformers/model_doc/byt5)** (from Google Research) released with the paper [ByT5: Towards a token-free future with pre-trained byte-to-byte models](https://arxiv.org/abs/2105.13626) by Linting Xue, Aditya Barua, Noah Constant, Rami Al-Rfou, Sharan Narang, Mihir Kale, Adam Roberts, Colin Raffel.
|
||||||
1. **[CamemBERT](https://huggingface.co/docs/transformers/model_doc/camembert)** (from Inria/Facebook/Sorbonne) released with the paper [CamemBERT: a Tasty French Language Model](https://arxiv.org/abs/1911.03894) by Louis Martin*, Benjamin Muller*, Pedro Javier Ortiz Suárez*, Yoann Dupont, Laurent Romary, Éric Villemonte de la Clergerie, Djamé Seddah and Benoît Sagot.
|
1. **[CamemBERT](https://huggingface.co/docs/transformers/model_doc/camembert)** (from Inria/Facebook/Sorbonne) released with the paper [CamemBERT: a Tasty French Language Model](https://arxiv.org/abs/1911.03894) by Louis Martin*, Benjamin Muller*, Pedro Javier Ortiz Suárez*, Yoann Dupont, Laurent Romary, Éric Villemonte de la Clergerie, Djamé Seddah and Benoît Sagot.
|
||||||
1. **[CANINE](https://huggingface.co/docs/transformers/model_doc/canine)** (from Google Research) released with the paper [CANINE: Pre-training an Efficient Tokenization-Free Encoder for Language Representation](https://arxiv.org/abs/2103.06874) by Jonathan H. Clark, Dan Garrette, Iulia Turc, John Wieting.
|
1. **[CANINE](https://huggingface.co/docs/transformers/model_doc/canine)** (from Google Research) released with the paper [CANINE: Pre-training an Efficient Tokenization-Free Encoder for Language Representation](https://arxiv.org/abs/2103.06874) by Jonathan H. Clark, Dan Garrette, Iulia Turc, John Wieting.
|
||||||
|
1. **[Chinese-CLIP](https://huggingface.co/docs/transformers/model_doc/chinese_clip)** (from OFA-Sys) released with the paper [Chinese CLIP: Contrastive Vision-Language Pretraining in Chinese](https://arxiv.org/abs/2211.01335) by An Yang, Junshu Pan, Junyang Lin, Rui Men, Yichang Zhang, Jingren Zhou, Chang Zhou.
|
||||||
|
1. **[CLAP](https://huggingface.co/docs/transformers/model_doc/clap)** (from LAION-AI) released with the paper [Large-scale Contrastive Language-Audio Pretraining with Feature Fusion and Keyword-to-Caption Augmentation](https://arxiv.org/abs/2211.06687) by Yusong Wu, Ke Chen, Tianyu Zhang, Yuchen Hui, Taylor Berg-Kirkpatrick, Shlomo Dubnov.
|
||||||
1. **[CLIP](https://huggingface.co/docs/transformers/model_doc/clip)** (from OpenAI) released with the paper [Learning Transferable Visual Models From Natural Language Supervision](https://arxiv.org/abs/2103.00020) by Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, Ilya Sutskever.
|
1. **[CLIP](https://huggingface.co/docs/transformers/model_doc/clip)** (from OpenAI) released with the paper [Learning Transferable Visual Models From Natural Language Supervision](https://arxiv.org/abs/2103.00020) by Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, Ilya Sutskever.
|
||||||
|
1. **[CLIPSeg](https://huggingface.co/docs/transformers/model_doc/clipseg)** (from University of Göttingen) released with the paper [Image Segmentation Using Text and Image Prompts](https://arxiv.org/abs/2112.10003) by Timo Lüddecke and Alexander Ecker.
|
||||||
1. **[CodeGen](https://huggingface.co/docs/transformers/model_doc/codegen)** (from Salesforce) released with the paper [A Conversational Paradigm for Program Synthesis](https://arxiv.org/abs/2203.13474) by Erik Nijkamp, Bo Pang, Hiroaki Hayashi, Lifu Tu, Huan Wang, Yingbo Zhou, Silvio Savarese, Caiming Xiong.
|
1. **[CodeGen](https://huggingface.co/docs/transformers/model_doc/codegen)** (from Salesforce) released with the paper [A Conversational Paradigm for Program Synthesis](https://arxiv.org/abs/2203.13474) by Erik Nijkamp, Bo Pang, Hiroaki Hayashi, Lifu Tu, Huan Wang, Yingbo Zhou, Silvio Savarese, Caiming Xiong.
|
||||||
1. **[Conditional DETR](https://huggingface.co/docs/transformers/model_doc/conditional_detr)** (from Microsoft Research Asia) released with the paper [Conditional DETR for Fast Training Convergence](https://arxiv.org/abs/2108.06152) by Depu Meng, Xiaokang Chen, Zejia Fan, Gang Zeng, Houqiang Li, Yuhui Yuan, Lei Sun, Jingdong Wang.
|
1. **[Conditional DETR](https://huggingface.co/docs/transformers/model_doc/conditional_detr)** (from Microsoft Research Asia) released with the paper [Conditional DETR for Fast Training Convergence](https://arxiv.org/abs/2108.06152) by Depu Meng, Xiaokang Chen, Zejia Fan, Gang Zeng, Houqiang Li, Yuhui Yuan, Lei Sun, Jingdong Wang.
|
||||||
1. **[ConvBERT](https://huggingface.co/docs/transformers/model_doc/convbert)** (from YituTech) released with the paper [ConvBERT: Improving BERT with Span-based Dynamic Convolution](https://arxiv.org/abs/2008.02496) by Zihang Jiang, Weihao Yu, Daquan Zhou, Yunpeng Chen, Jiashi Feng, Shuicheng Yan.
|
1. **[ConvBERT](https://huggingface.co/docs/transformers/model_doc/convbert)** (from YituTech) released with the paper [ConvBERT: Improving BERT with Span-based Dynamic Convolution](https://arxiv.org/abs/2008.02496) by Zihang Jiang, Weihao Yu, Daquan Zhou, Yunpeng Chen, Jiashi Feng, Shuicheng Yan.
|
||||||
1. **[ConvNeXT](https://huggingface.co/docs/transformers/model_doc/convnext)** (from Facebook AI) released with the paper [A ConvNet for the 2020s](https://arxiv.org/abs/2201.03545) by Zhuang Liu, Hanzi Mao, Chao-Yuan Wu, Christoph Feichtenhofer, Trevor Darrell, Saining Xie.
|
1. **[ConvNeXT](https://huggingface.co/docs/transformers/model_doc/convnext)** (from Facebook AI) released with the paper [A ConvNet for the 2020s](https://arxiv.org/abs/2201.03545) by Zhuang Liu, Hanzi Mao, Chao-Yuan Wu, Christoph Feichtenhofer, Trevor Darrell, Saining Xie.
|
||||||
|
1. **[ConvNeXTV2](https://huggingface.co/docs/transformers/model_doc/convnextv2)** (from Facebook AI) released with the paper [ConvNeXt V2: Co-designing and Scaling ConvNets with Masked Autoencoders](https://arxiv.org/abs/2301.00808) by Sanghyun Woo, Shoubhik Debnath, Ronghang Hu, Xinlei Chen, Zhuang Liu, In So Kweon, Saining Xie.
|
||||||
1. **[CPM](https://huggingface.co/docs/transformers/model_doc/cpm)** (from Tsinghua University) released with the paper [CPM: A Large-scale Generative Chinese Pre-trained Language Model](https://arxiv.org/abs/2012.00413) by Zhengyan Zhang, Xu Han, Hao Zhou, Pei Ke, Yuxian Gu, Deming Ye, Yujia Qin, Yusheng Su, Haozhe Ji, Jian Guan, Fanchao Qi, Xiaozhi Wang, Yanan Zheng, Guoyang Zeng, Huanqi Cao, Shengqi Chen, Daixuan Li, Zhenbo Sun, Zhiyuan Liu, Minlie Huang, Wentao Han, Jie Tang, Juanzi Li, Xiaoyan Zhu, Maosong Sun.
|
1. **[CPM](https://huggingface.co/docs/transformers/model_doc/cpm)** (from Tsinghua University) released with the paper [CPM: A Large-scale Generative Chinese Pre-trained Language Model](https://arxiv.org/abs/2012.00413) by Zhengyan Zhang, Xu Han, Hao Zhou, Pei Ke, Yuxian Gu, Deming Ye, Yujia Qin, Yusheng Su, Haozhe Ji, Jian Guan, Fanchao Qi, Xiaozhi Wang, Yanan Zheng, Guoyang Zeng, Huanqi Cao, Shengqi Chen, Daixuan Li, Zhenbo Sun, Zhiyuan Liu, Minlie Huang, Wentao Han, Jie Tang, Juanzi Li, Xiaoyan Zhu, Maosong Sun.
|
||||||
|
1. **[CPM-Ant](https://huggingface.co/docs/transformers/model_doc/cpmant)** (from OpenBMB) released by the [OpenBMB](https://www.openbmb.org/).
|
||||||
1. **[CTRL](https://huggingface.co/docs/transformers/model_doc/ctrl)** (from Salesforce) released with the paper [CTRL: A Conditional Transformer Language Model for Controllable Generation](https://arxiv.org/abs/1909.05858) by Nitish Shirish Keskar*, Bryan McCann*, Lav R. Varshney, Caiming Xiong and Richard Socher.
|
1. **[CTRL](https://huggingface.co/docs/transformers/model_doc/ctrl)** (from Salesforce) released with the paper [CTRL: A Conditional Transformer Language Model for Controllable Generation](https://arxiv.org/abs/1909.05858) by Nitish Shirish Keskar*, Bryan McCann*, Lav R. Varshney, Caiming Xiong and Richard Socher.
|
||||||
1. **[CvT](https://huggingface.co/docs/transformers/model_doc/cvt)** (from Microsoft) released with the paper [CvT: Introducing Convolutions to Vision Transformers](https://arxiv.org/abs/2103.15808) by Haiping Wu, Bin Xiao, Noel Codella, Mengchen Liu, Xiyang Dai, Lu Yuan, Lei Zhang.
|
1. **[CvT](https://huggingface.co/docs/transformers/model_doc/cvt)** (from Microsoft) released with the paper [CvT: Introducing Convolutions to Vision Transformers](https://arxiv.org/abs/2103.15808) by Haiping Wu, Bin Xiao, Noel Codella, Mengchen Liu, Xiyang Dai, Lu Yuan, Lei Zhang.
|
||||||
1. **[Data2Vec](https://huggingface.co/docs/transformers/model_doc/data2vec)** (from Facebook) released with the paper [Data2Vec: A General Framework for Self-supervised Learning in Speech, Vision and Language](https://arxiv.org/abs/2202.03555) by Alexei Baevski, Wei-Ning Hsu, Qiantong Xu, Arun Babu, Jiatao Gu, Michael Auli.
|
1. **[Data2Vec](https://huggingface.co/docs/transformers/model_doc/data2vec)** (from Facebook) released with the paper [Data2Vec: A General Framework for Self-supervised Learning in Speech, Vision and Language](https://arxiv.org/abs/2202.03555) by Alexei Baevski, Wei-Ning Hsu, Qiantong Xu, Arun Babu, Jiatao Gu, Michael Auli.
|
||||||
@ -291,21 +332,33 @@ Current number of checkpoints: ** (from Berkeley/Facebook/Google) released with the paper [Decision Transformer: Reinforcement Learning via Sequence Modeling](https://arxiv.org/abs/2106.01345) by Lili Chen, Kevin Lu, Aravind Rajeswaran, Kimin Lee, Aditya Grover, Michael Laskin, Pieter Abbeel, Aravind Srinivas, Igor Mordatch.
|
1. **[Decision Transformer](https://huggingface.co/docs/transformers/model_doc/decision_transformer)** (from Berkeley/Facebook/Google) released with the paper [Decision Transformer: Reinforcement Learning via Sequence Modeling](https://arxiv.org/abs/2106.01345) by Lili Chen, Kevin Lu, Aravind Rajeswaran, Kimin Lee, Aditya Grover, Michael Laskin, Pieter Abbeel, Aravind Srinivas, Igor Mordatch.
|
||||||
1. **[Deformable DETR](https://huggingface.co/docs/transformers/model_doc/deformable_detr)** (from SenseTime Research) released with the paper [Deformable DETR: Deformable Transformers for End-to-End Object Detection](https://arxiv.org/abs/2010.04159) by Xizhou Zhu, Weijie Su, Lewei Lu, Bin Li, Xiaogang Wang, Jifeng Dai.
|
1. **[Deformable DETR](https://huggingface.co/docs/transformers/model_doc/deformable_detr)** (from SenseTime Research) released with the paper [Deformable DETR: Deformable Transformers for End-to-End Object Detection](https://arxiv.org/abs/2010.04159) by Xizhou Zhu, Weijie Su, Lewei Lu, Bin Li, Xiaogang Wang, Jifeng Dai.
|
||||||
1. **[DeiT](https://huggingface.co/docs/transformers/model_doc/deit)** (from Facebook) released with the paper [Training data-efficient image transformers & distillation through attention](https://arxiv.org/abs/2012.12877) by Hugo Touvron, Matthieu Cord, Matthijs Douze, Francisco Massa, Alexandre Sablayrolles, Hervé Jégou.
|
1. **[DeiT](https://huggingface.co/docs/transformers/model_doc/deit)** (from Facebook) released with the paper [Training data-efficient image transformers & distillation through attention](https://arxiv.org/abs/2012.12877) by Hugo Touvron, Matthieu Cord, Matthijs Douze, Francisco Massa, Alexandre Sablayrolles, Hervé Jégou.
|
||||||
|
1. **[DePlot](https://huggingface.co/docs/transformers/model_doc/deplot)** (from Google AI) released with the paper [DePlot: One-shot visual language reasoning by plot-to-table translation](https://arxiv.org/abs/2212.10505) by Fangyu Liu, Julian Martin Eisenschlos, Francesco Piccinno, Syrine Krichene, Chenxi Pang, Kenton Lee, Mandar Joshi, Wenhu Chen, Nigel Collier, Yasemin Altun.
|
||||||
|
1. **[DETA](https://huggingface.co/docs/transformers/model_doc/deta)** (from The University of Texas at Austin) released with the paper [NMS Strikes Back](https://arxiv.org/abs/2212.06137) by Jeffrey Ouyang-Zhang, Jang Hyun Cho, Xingyi Zhou, Philipp Krähenbühl.
|
||||||
1. **[DETR](https://huggingface.co/docs/transformers/model_doc/detr)** (from Facebook) released with the paper [End-to-End Object Detection with Transformers](https://arxiv.org/abs/2005.12872) by Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, Sergey Zagoruyko.
|
1. **[DETR](https://huggingface.co/docs/transformers/model_doc/detr)** (from Facebook) released with the paper [End-to-End Object Detection with Transformers](https://arxiv.org/abs/2005.12872) by Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, Sergey Zagoruyko.
|
||||||
1. **[DialoGPT](https://huggingface.co/docs/transformers/model_doc/dialogpt)** (from Microsoft Research) released with the paper [DialoGPT: Large-Scale Generative Pre-training for Conversational Response Generation](https://arxiv.org/abs/1911.00536) by Yizhe Zhang, Siqi Sun, Michel Galley, Yen-Chun Chen, Chris Brockett, Xiang Gao, Jianfeng Gao, Jingjing Liu, Bill Dolan.
|
1. **[DialoGPT](https://huggingface.co/docs/transformers/model_doc/dialogpt)** (from Microsoft Research) released with the paper [DialoGPT: Large-Scale Generative Pre-training for Conversational Response Generation](https://arxiv.org/abs/1911.00536) by Yizhe Zhang, Siqi Sun, Michel Galley, Yen-Chun Chen, Chris Brockett, Xiang Gao, Jianfeng Gao, Jingjing Liu, Bill Dolan.
|
||||||
|
1. **[DiNAT](https://huggingface.co/docs/transformers/model_doc/dinat)** (from SHI Labs) released with the paper [Dilated Neighborhood Attention Transformer](https://arxiv.org/abs/2209.15001) by Ali Hassani and Humphrey Shi.
|
||||||
1. **[DistilBERT](https://huggingface.co/docs/transformers/model_doc/distilbert)** (from HuggingFace), released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same method has been applied to compress GPT2 into [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/research_projects/distillation), RoBERTa into [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/research_projects/distillation), Multilingual BERT into [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/research_projects/distillation) and a German version of DistilBERT.
|
1. **[DistilBERT](https://huggingface.co/docs/transformers/model_doc/distilbert)** (from HuggingFace), released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same method has been applied to compress GPT2 into [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/research_projects/distillation), RoBERTa into [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/research_projects/distillation), Multilingual BERT into [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/research_projects/distillation) and a German version of DistilBERT.
|
||||||
1. **[DiT](https://huggingface.co/docs/transformers/model_doc/dit)** (from Microsoft Research) released with the paper [DiT: Self-supervised Pre-training for Document Image Transformer](https://arxiv.org/abs/2203.02378) by Junlong Li, Yiheng Xu, Tengchao Lv, Lei Cui, Cha Zhang, Furu Wei.
|
1. **[DiT](https://huggingface.co/docs/transformers/model_doc/dit)** (from Microsoft Research) released with the paper [DiT: Self-supervised Pre-training for Document Image Transformer](https://arxiv.org/abs/2203.02378) by Junlong Li, Yiheng Xu, Tengchao Lv, Lei Cui, Cha Zhang, Furu Wei.
|
||||||
1. **[Donut](https://huggingface.co/docs/transformers/model_doc/donut)** (from NAVER), released together with the paper [OCR-free Document Understanding Transformer](https://arxiv.org/abs/2111.15664) by Geewook Kim, Teakgyu Hong, Moonbin Yim, Jeongyeon Nam, Jinyoung Park, Jinyeong Yim, Wonseok Hwang, Sangdoo Yun, Dongyoon Han, Seunghyun Park.
|
1. **[Donut](https://huggingface.co/docs/transformers/model_doc/donut)** (from NAVER), released together with the paper [OCR-free Document Understanding Transformer](https://arxiv.org/abs/2111.15664) by Geewook Kim, Teakgyu Hong, Moonbin Yim, Jeongyeon Nam, Jinyoung Park, Jinyeong Yim, Wonseok Hwang, Sangdoo Yun, Dongyoon Han, Seunghyun Park.
|
||||||
1. **[DPR](https://huggingface.co/docs/transformers/model_doc/dpr)** (from Facebook) released with the paper [Dense Passage Retrieval for Open-Domain Question Answering](https://arxiv.org/abs/2004.04906) by Vladimir Karpukhin, Barlas Oğuz, Sewon Min, Patrick Lewis, Ledell Wu, Sergey Edunov, Danqi Chen, and Wen-tau Yih.
|
1. **[DPR](https://huggingface.co/docs/transformers/model_doc/dpr)** (from Facebook) released with the paper [Dense Passage Retrieval for Open-Domain Question Answering](https://arxiv.org/abs/2004.04906) by Vladimir Karpukhin, Barlas Oğuz, Sewon Min, Patrick Lewis, Ledell Wu, Sergey Edunov, Danqi Chen, and Wen-tau Yih.
|
||||||
1. **[DPT](https://huggingface.co/docs/transformers/master/model_doc/dpt)** (from Intel Labs) released with the paper [Vision Transformers for Dense Prediction](https://arxiv.org/abs/2103.13413) by René Ranftl, Alexey Bochkovskiy, Vladlen Koltun.
|
1. **[DPT](https://huggingface.co/docs/transformers/master/model_doc/dpt)** (from Intel Labs) released with the paper [Vision Transformers for Dense Prediction](https://arxiv.org/abs/2103.13413) by René Ranftl, Alexey Bochkovskiy, Vladlen Koltun.
|
||||||
|
1. **[EfficientFormer](https://huggingface.co/docs/transformers/model_doc/efficientformer)** (from Snap Research) released with the paper [EfficientFormer: Vision Transformers at MobileNetSpeed](https://arxiv.org/abs/2206.01191) by Yanyu Li, Geng Yuan, Yang Wen, Ju Hu, Georgios Evangelidis, Sergey Tulyakov, Yanzhi Wang, Jian Ren.
|
||||||
|
1. **[EfficientNet](https://huggingface.co/docs/transformers/model_doc/efficientnet)** (from Google Brain) released with the paper [EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks](https://arxiv.org/abs/1905.11946) by Mingxing Tan, Quoc V. Le.
|
||||||
1. **[ELECTRA](https://huggingface.co/docs/transformers/model_doc/electra)** (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang Luong, Quoc V. Le, Christopher D. Manning.
|
1. **[ELECTRA](https://huggingface.co/docs/transformers/model_doc/electra)** (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang Luong, Quoc V. Le, Christopher D. Manning.
|
||||||
|
1. **[EnCodec](https://huggingface.co/docs/transformers/model_doc/encodec)** (from Meta AI) released with the paper [High Fidelity Neural Audio Compression](https://arxiv.org/abs/2210.13438) by Alexandre Défossez, Jade Copet, Gabriel Synnaeve, Yossi Adi.
|
||||||
1. **[EncoderDecoder](https://huggingface.co/docs/transformers/model_doc/encoder-decoder)** (from Google Research) released with the paper [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://arxiv.org/abs/1907.12461) by Sascha Rothe, Shashi Narayan, Aliaksei Severyn.
|
1. **[EncoderDecoder](https://huggingface.co/docs/transformers/model_doc/encoder-decoder)** (from Google Research) released with the paper [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://arxiv.org/abs/1907.12461) by Sascha Rothe, Shashi Narayan, Aliaksei Severyn.
|
||||||
1. **[ERNIE](https://huggingface.co/docs/transformers/model_doc/ernie)** (from Baidu) released with the paper [ERNIE: Enhanced Representation through Knowledge Integration](https://arxiv.org/abs/1904.09223) by Yu Sun, Shuohuan Wang, Yukun Li, Shikun Feng, Xuyi Chen, Han Zhang, Xin Tian, Danxiang Zhu, Hao Tian, Hua Wu.
|
1. **[ERNIE](https://huggingface.co/docs/transformers/model_doc/ernie)** (from Baidu) released with the paper [ERNIE: Enhanced Representation through Knowledge Integration](https://arxiv.org/abs/1904.09223) by Yu Sun, Shuohuan Wang, Yukun Li, Shikun Feng, Xuyi Chen, Han Zhang, Xin Tian, Danxiang Zhu, Hao Tian, Hua Wu.
|
||||||
1. **[ESM](https://huggingface.co/docs/transformers/model_doc/esm)** (from Meta AI) are transformer protein language models. **ESM-1b** was released with the paper [Biological structure and function emerge from scaling unsupervised learning to 250 million protein sequences](https://www.pnas.org/content/118/15/e2016239118) by Alexander Rives, Joshua Meier, Tom Sercu, Siddharth Goyal, Zeming Lin, Jason Liu, Demi Guo, Myle Ott, C. Lawrence Zitnick, Jerry Ma, and Rob Fergus. **ESM-1v** was released with the paper [Language models enable zero-shot prediction of the effects of mutations on protein function](https://doi.org/10.1101/2021.07.09.450648) by Joshua Meier, Roshan Rao, Robert Verkuil, Jason Liu, Tom Sercu and Alexander Rives. **ESM-2** was released with the paper [Language models of protein sequences at the scale of evolution enable accurate structure prediction](https://doi.org/10.1101/2022.07.20.500902) by Zeming Lin, Halil Akin, Roshan Rao, Brian Hie, Zhongkai Zhu, Wenting Lu, Allan dos Santos Costa, Maryam Fazel-Zarandi, Tom Sercu, Sal Candido, Alexander Rives.
|
1. **[ErnieM](https://huggingface.co/docs/transformers/model_doc/ernie_m)** (from Baidu) released with the paper [ERNIE-M: Enhanced Multilingual Representation by Aligning Cross-lingual Semantics with Monolingual Corpora](https://arxiv.org/abs/2012.15674) by Xuan Ouyang, Shuohuan Wang, Chao Pang, Yu Sun, Hao Tian, Hua Wu, Haifeng Wang.
|
||||||
|
1. **[ESM](https://huggingface.co/docs/transformers/model_doc/esm)** (from Meta AI) are transformer protein language models. **ESM-1b** was released with the paper [Biological structure and function emerge from scaling unsupervised learning to 250 million protein sequences](https://www.pnas.org/content/118/15/e2016239118) by Alexander Rives, Joshua Meier, Tom Sercu, Siddharth Goyal, Zeming Lin, Jason Liu, Demi Guo, Myle Ott, C. Lawrence Zitnick, Jerry Ma, and Rob Fergus. **ESM-1v** was released with the paper [Language models enable zero-shot prediction of the effects of mutations on protein function](https://doi.org/10.1101/2021.07.09.450648) by Joshua Meier, Roshan Rao, Robert Verkuil, Jason Liu, Tom Sercu and Alexander Rives. **ESM-2 and ESMFold** were released with the paper [Language models of protein sequences at the scale of evolution enable accurate structure prediction](https://doi.org/10.1101/2022.07.20.500902) by Zeming Lin, Halil Akin, Roshan Rao, Brian Hie, Zhongkai Zhu, Wenting Lu, Allan dos Santos Costa, Maryam Fazel-Zarandi, Tom Sercu, Sal Candido, Alexander Rives.
|
||||||
|
1. **[Falcon](https://huggingface.co/docs/transformers/model_doc/falcon)** (from Technology Innovation Institute) by Almazrouei, Ebtesam and Alobeidli, Hamza and Alshamsi, Abdulaziz and Cappelli, Alessandro and Cojocaru, Ruxandra and Debbah, Merouane and Goffinet, Etienne and Heslow, Daniel and Launay, Julien and Malartic, Quentin and Noune, Badreddine and Pannier, Baptiste and Penedo, Guilherme.
|
||||||
|
1. **[FLAN-T5](https://huggingface.co/docs/transformers/model_doc/flan-t5)** (from Google AI) released in the repository [google-research/t5x](https://github.com/google-research/t5x/blob/main/docs/models.md#flan-t5-checkpoints) by Hyung Won Chung, Le Hou, Shayne Longpre, Barret Zoph, Yi Tay, William Fedus, Eric Li, Xuezhi Wang, Mostafa Dehghani, Siddhartha Brahma, Albert Webson, Shixiang Shane Gu, Zhuyun Dai, Mirac Suzgun, Xinyun Chen, Aakanksha Chowdhery, Sharan Narang, Gaurav Mishra, Adams Yu, Vincent Zhao, Yanping Huang, Andrew Dai, Hongkun Yu, Slav Petrov, Ed H. Chi, Jeff Dean, Jacob Devlin, Adam Roberts, Denny Zhou, Quoc V. Le, and Jason Wei
|
||||||
|
1. **[FLAN-UL2](https://huggingface.co/docs/transformers/model_doc/flan-ul2)** (from Google AI) released in the repository [google-research/t5x](https://github.com/google-research/t5x/blob/main/docs/models.md#flan-ul2-checkpoints) by Hyung Won Chung, Le Hou, Shayne Longpre, Barret Zoph, Yi Tay, William Fedus, Eric Li, Xuezhi Wang, Mostafa Dehghani, Siddhartha Brahma, Albert Webson, Shixiang Shane Gu, Zhuyun Dai, Mirac Suzgun, Xinyun Chen, Aakanksha Chowdhery, Sharan Narang, Gaurav Mishra, Adams Yu, Vincent Zhao, Yanping Huang, Andrew Dai, Hongkun Yu, Slav Petrov, Ed H. Chi, Jeff Dean, Jacob Devlin, Adam Roberts, Denny Zhou, Quoc V. Le, and Jason Wei
|
||||||
1. **[FlauBERT](https://huggingface.co/docs/transformers/model_doc/flaubert)** (from CNRS) released with the paper [FlauBERT: Unsupervised Language Model Pre-training for French](https://arxiv.org/abs/1912.05372) by Hang Le, Loïc Vial, Jibril Frej, Vincent Segonne, Maximin Coavoux, Benjamin Lecouteux, Alexandre Allauzen, Benoît Crabbé, Laurent Besacier, Didier Schwab.
|
1. **[FlauBERT](https://huggingface.co/docs/transformers/model_doc/flaubert)** (from CNRS) released with the paper [FlauBERT: Unsupervised Language Model Pre-training for French](https://arxiv.org/abs/1912.05372) by Hang Le, Loïc Vial, Jibril Frej, Vincent Segonne, Maximin Coavoux, Benjamin Lecouteux, Alexandre Allauzen, Benoît Crabbé, Laurent Besacier, Didier Schwab.
|
||||||
1. **[FLAVA](https://huggingface.co/docs/transformers/model_doc/flava)** (from Facebook AI) released with the paper [FLAVA: A Foundational Language And Vision Alignment Model](https://arxiv.org/abs/2112.04482) by Amanpreet Singh, Ronghang Hu, Vedanuj Goswami, Guillaume Couairon, Wojciech Galuba, Marcus Rohrbach, and Douwe Kiela.
|
1. **[FLAVA](https://huggingface.co/docs/transformers/model_doc/flava)** (from Facebook AI) released with the paper [FLAVA: A Foundational Language And Vision Alignment Model](https://arxiv.org/abs/2112.04482) by Amanpreet Singh, Ronghang Hu, Vedanuj Goswami, Guillaume Couairon, Wojciech Galuba, Marcus Rohrbach, and Douwe Kiela.
|
||||||
1. **[FNet](https://huggingface.co/docs/transformers/model_doc/fnet)** (from Google Research) released with the paper [FNet: Mixing Tokens with Fourier Transforms](https://arxiv.org/abs/2105.03824) by James Lee-Thorp, Joshua Ainslie, Ilya Eckstein, Santiago Ontanon.
|
1. **[FNet](https://huggingface.co/docs/transformers/model_doc/fnet)** (from Google Research) released with the paper [FNet: Mixing Tokens with Fourier Transforms](https://arxiv.org/abs/2105.03824) by James Lee-Thorp, Joshua Ainslie, Ilya Eckstein, Santiago Ontanon.
|
||||||
|
1. **[FocalNet](https://huggingface.co/docs/transformers/model_doc/focalnet)** (from Microsoft Research) released with the paper [Focal Modulation Networks](https://arxiv.org/abs/2203.11926) by Jianwei Yang, Chunyuan Li, Xiyang Dai, Lu Yuan, Jianfeng Gao.
|
||||||
1. **[Funnel Transformer](https://huggingface.co/docs/transformers/model_doc/funnel)** (from CMU/Google Brain) released with the paper [Funnel-Transformer: Filtering out Sequential Redundancy for Efficient Language Processing](https://arxiv.org/abs/2006.03236) by Zihang Dai, Guokun Lai, Yiming Yang, Quoc V. Le.
|
1. **[Funnel Transformer](https://huggingface.co/docs/transformers/model_doc/funnel)** (from CMU/Google Brain) released with the paper [Funnel-Transformer: Filtering out Sequential Redundancy for Efficient Language Processing](https://arxiv.org/abs/2006.03236) by Zihang Dai, Guokun Lai, Yiming Yang, Quoc V. Le.
|
||||||
|
1. **[GIT](https://huggingface.co/docs/transformers/model_doc/git)** (from Microsoft Research) released with the paper [GIT: A Generative Image-to-text Transformer for Vision and Language](https://arxiv.org/abs/2205.14100) by Jianfeng Wang, Zhengyuan Yang, Xiaowei Hu, Linjie Li, Kevin Lin, Zhe Gan, Zicheng Liu, Ce Liu, Lijuan Wang.
|
||||||
1. **[GLPN](https://huggingface.co/docs/transformers/model_doc/glpn)** (from KAIST) released with the paper [Global-Local Path Networks for Monocular Depth Estimation with Vertical CutDepth](https://arxiv.org/abs/2201.07436) by Doyeon Kim, Woonghyun Ga, Pyungwhan Ahn, Donggyu Joo, Sehwan Chun, Junmo Kim.
|
1. **[GLPN](https://huggingface.co/docs/transformers/model_doc/glpn)** (from KAIST) released with the paper [Global-Local Path Networks for Monocular Depth Estimation with Vertical CutDepth](https://arxiv.org/abs/2201.07436) by Doyeon Kim, Woonghyun Ga, Pyungwhan Ahn, Donggyu Joo, Sehwan Chun, Junmo Kim.
|
||||||
1. **[GPT](https://huggingface.co/docs/transformers/model_doc/openai-gpt)** (from OpenAI) released with the paper [Improving Language Understanding by Generative Pre-Training](https://blog.openai.com/language-unsupervised/) by Alec Radford, Karthik Narasimhan, Tim Salimans and Ilya Sutskever.
|
1. **[GPT](https://huggingface.co/docs/transformers/model_doc/openai-gpt)** (from OpenAI) released with the paper [Improving Language Understanding by Generative Pre-Training](https://blog.openai.com/language-unsupervised/) by Alec Radford, Karthik Narasimhan, Tim Salimans and Ilya Sutskever.
|
||||||
1. **[GPT Neo](https://huggingface.co/docs/transformers/model_doc/gpt_neo)** (from EleutherAI) released in the repository [EleutherAI/gpt-neo](https://github.com/EleutherAI/gpt-neo) by Sid Black, Stella Biderman, Leo Gao, Phil Wang and Connor Leahy.
|
1. **[GPT Neo](https://huggingface.co/docs/transformers/model_doc/gpt_neo)** (from EleutherAI) released in the repository [EleutherAI/gpt-neo](https://github.com/EleutherAI/gpt-neo) by Sid Black, Stella Biderman, Leo Gao, Phil Wang and Connor Leahy.
|
||||||
@ -313,17 +366,26 @@ Current number of checkpoints: ** (from ABEJA) released by Shinya Otani, Takayoshi Makabe, Anuj Arora, and Kyo Hattori.
|
1. **[GPT NeoX Japanese](https://huggingface.co/docs/transformers/model_doc/gpt_neox_japanese)** (from ABEJA) released by Shinya Otani, Takayoshi Makabe, Anuj Arora, and Kyo Hattori.
|
||||||
1. **[GPT-2](https://huggingface.co/docs/transformers/model_doc/gpt2)** (from OpenAI) released with the paper [Language Models are Unsupervised Multitask Learners](https://blog.openai.com/better-language-models/) by Alec Radford*, Jeffrey Wu*, Rewon Child, David Luan, Dario Amodei** and Ilya Sutskever**.
|
1. **[GPT-2](https://huggingface.co/docs/transformers/model_doc/gpt2)** (from OpenAI) released with the paper [Language Models are Unsupervised Multitask Learners](https://blog.openai.com/better-language-models/) by Alec Radford*, Jeffrey Wu*, Rewon Child, David Luan, Dario Amodei** and Ilya Sutskever**.
|
||||||
1. **[GPT-J](https://huggingface.co/docs/transformers/model_doc/gptj)** (from EleutherAI) released in the repository [kingoflolz/mesh-transformer-jax](https://github.com/kingoflolz/mesh-transformer-jax/) by Ben Wang and Aran Komatsuzaki.
|
1. **[GPT-J](https://huggingface.co/docs/transformers/model_doc/gptj)** (from EleutherAI) released in the repository [kingoflolz/mesh-transformer-jax](https://github.com/kingoflolz/mesh-transformer-jax/) by Ben Wang and Aran Komatsuzaki.
|
||||||
|
1. **[GPT-Sw3](https://huggingface.co/docs/transformers/model_doc/gpt-sw3)** (from AI-Sweden) released with the paper [Lessons Learned from GPT-SW3: Building the First Large-Scale Generative Language Model for Swedish](http://www.lrec-conf.org/proceedings/lrec2022/pdf/2022.lrec-1.376.pdf) by Ariel Ekgren, Amaru Cuba Gyllensten, Evangelia Gogoulou, Alice Heiman, Severine Verlinden, Joey Öhman, Fredrik Carlsson, Magnus Sahlgren.
|
||||||
|
1. **[GPTBigCode](https://huggingface.co/docs/transformers/model_doc/gpt_bigcode)** (from BigCode) released with the paper [SantaCoder: don't reach for the stars!](https://arxiv.org/abs/2301.03988) by Loubna Ben Allal, Raymond Li, Denis Kocetkov, Chenghao Mou, Christopher Akiki, Carlos Munoz Ferrandis, Niklas Muennighoff, Mayank Mishra, Alex Gu, Manan Dey, Logesh Kumar Umapathi, Carolyn Jane Anderson, Yangtian Zi, Joel Lamy Poirier, Hailey Schoelkopf, Sergey Troshin, Dmitry Abulkhanov, Manuel Romero, Michael Lappert, Francesco De Toni, Bernardo García del Río, Qian Liu, Shamik Bose, Urvashi Bhattacharyya, Terry Yue Zhuo, Ian Yu, Paulo Villegas, Marco Zocca, Sourab Mangrulkar, David Lansky, Huu Nguyen, Danish Contractor, Luis Villa, Jia Li, Dzmitry Bahdanau, Yacine Jernite, Sean Hughes, Daniel Fried, Arjun Guha, Harm de Vries, Leandro von Werra.
|
||||||
|
1. **[GPTSAN-japanese](https://huggingface.co/docs/transformers/model_doc/gptsan-japanese)** released in the repository [tanreinama/GPTSAN](https://github.com/tanreinama/GPTSAN/blob/main/report/model.md) by Toshiyuki Sakamoto(tanreinama).
|
||||||
|
1. **[Graphormer](https://huggingface.co/docs/transformers/model_doc/graphormer)** (from Microsoft) released with the paper [Do Transformers Really Perform Bad for Graph Representation?](https://arxiv.org/abs/2106.05234) by Chengxuan Ying, Tianle Cai, Shengjie Luo, Shuxin Zheng, Guolin Ke, Di He, Yanming Shen, Tie-Yan Liu.
|
||||||
1. **[GroupViT](https://huggingface.co/docs/transformers/model_doc/groupvit)** (from UCSD, NVIDIA) released with the paper [GroupViT: Semantic Segmentation Emerges from Text Supervision](https://arxiv.org/abs/2202.11094) by Jiarui Xu, Shalini De Mello, Sifei Liu, Wonmin Byeon, Thomas Breuel, Jan Kautz, Xiaolong Wang.
|
1. **[GroupViT](https://huggingface.co/docs/transformers/model_doc/groupvit)** (from UCSD, NVIDIA) released with the paper [GroupViT: Semantic Segmentation Emerges from Text Supervision](https://arxiv.org/abs/2202.11094) by Jiarui Xu, Shalini De Mello, Sifei Liu, Wonmin Byeon, Thomas Breuel, Jan Kautz, Xiaolong Wang.
|
||||||
1. **[Hubert](https://huggingface.co/docs/transformers/model_doc/hubert)** (from Facebook) released with the paper [HuBERT: Self-Supervised Speech Representation Learning by Masked Prediction of Hidden Units](https://arxiv.org/abs/2106.07447) by Wei-Ning Hsu, Benjamin Bolte, Yao-Hung Hubert Tsai, Kushal Lakhotia, Ruslan Salakhutdinov, Abdelrahman Mohamed.
|
1. **[Hubert](https://huggingface.co/docs/transformers/model_doc/hubert)** (from Facebook) released with the paper [HuBERT: Self-Supervised Speech Representation Learning by Masked Prediction of Hidden Units](https://arxiv.org/abs/2106.07447) by Wei-Ning Hsu, Benjamin Bolte, Yao-Hung Hubert Tsai, Kushal Lakhotia, Ruslan Salakhutdinov, Abdelrahman Mohamed.
|
||||||
1. **[I-BERT](https://huggingface.co/docs/transformers/model_doc/ibert)** (from Berkeley) released with the paper [I-BERT: Integer-only BERT Quantization](https://arxiv.org/abs/2101.01321) by Sehoon Kim, Amir Gholami, Zhewei Yao, Michael W. Mahoney, Kurt Keutzer.
|
1. **[I-BERT](https://huggingface.co/docs/transformers/model_doc/ibert)** (from Berkeley) released with the paper [I-BERT: Integer-only BERT Quantization](https://arxiv.org/abs/2101.01321) by Sehoon Kim, Amir Gholami, Zhewei Yao, Michael W. Mahoney, Kurt Keutzer.
|
||||||
1. **[ImageGPT](https://huggingface.co/docs/transformers/model_doc/imagegpt)** (from OpenAI) released with the paper [Generative Pretraining from Pixels](https://openai.com/blog/image-gpt/) by Mark Chen, Alec Radford, Rewon Child, Jeffrey Wu, Heewoo Jun, David Luan, Ilya Sutskever.
|
1. **[ImageGPT](https://huggingface.co/docs/transformers/model_doc/imagegpt)** (from OpenAI) released with the paper [Generative Pretraining from Pixels](https://openai.com/blog/image-gpt/) by Mark Chen, Alec Radford, Rewon Child, Jeffrey Wu, Heewoo Jun, David Luan, Ilya Sutskever.
|
||||||
|
1. **[Informer](https://huggingface.co/docs/transformers/model_doc/informer)** (from Beihang University, UC Berkeley, Rutgers University, SEDD Company) released with the paper [Informer: Beyond Efficient Transformer for Long Sequence Time-Series Forecasting](https://arxiv.org/abs/2012.07436) by Haoyi Zhou, Shanghang Zhang, Jieqi Peng, Shuai Zhang, Jianxin Li, Hui Xiong, and Wancai Zhang.
|
||||||
|
1. **[InstructBLIP](https://huggingface.co/docs/transformers/model_doc/instructblip)** (from Salesforce) released with the paper [InstructBLIP: Towards General-purpose Vision-Language Models with Instruction Tuning](https://arxiv.org/abs/2305.06500) by Wenliang Dai, Junnan Li, Dongxu Li, Anthony Meng Huat Tiong, Junqi Zhao, Weisheng Wang, Boyang Li, Pascale Fung, Steven Hoi.
|
||||||
|
1. **[Jukebox](https://huggingface.co/docs/transformers/model_doc/jukebox)** (from OpenAI) released with the paper [Jukebox: A Generative Model for Music](https://arxiv.org/pdf/2005.00341.pdf) by Prafulla Dhariwal, Heewoo Jun, Christine Payne, Jong Wook Kim, Alec Radford, Ilya Sutskever.
|
||||||
1. **[LayoutLM](https://huggingface.co/docs/transformers/model_doc/layoutlm)** (from Microsoft Research Asia) released with the paper [LayoutLM: Pre-training of Text and Layout for Document Image Understanding](https://arxiv.org/abs/1912.13318) by Yiheng Xu, Minghao Li, Lei Cui, Shaohan Huang, Furu Wei, Ming Zhou.
|
1. **[LayoutLM](https://huggingface.co/docs/transformers/model_doc/layoutlm)** (from Microsoft Research Asia) released with the paper [LayoutLM: Pre-training of Text and Layout for Document Image Understanding](https://arxiv.org/abs/1912.13318) by Yiheng Xu, Minghao Li, Lei Cui, Shaohan Huang, Furu Wei, Ming Zhou.
|
||||||
1. **[LayoutLMv2](https://huggingface.co/docs/transformers/model_doc/layoutlmv2)** (from Microsoft Research Asia) released with the paper [LayoutLMv2: Multi-modal Pre-training for Visually-Rich Document Understanding](https://arxiv.org/abs/2012.14740) by Yang Xu, Yiheng Xu, Tengchao Lv, Lei Cui, Furu Wei, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Wanxiang Che, Min Zhang, Lidong Zhou.
|
1. **[LayoutLMv2](https://huggingface.co/docs/transformers/model_doc/layoutlmv2)** (from Microsoft Research Asia) released with the paper [LayoutLMv2: Multi-modal Pre-training for Visually-Rich Document Understanding](https://arxiv.org/abs/2012.14740) by Yang Xu, Yiheng Xu, Tengchao Lv, Lei Cui, Furu Wei, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Wanxiang Che, Min Zhang, Lidong Zhou.
|
||||||
1. **[LayoutLMv3](https://huggingface.co/docs/transformers/model_doc/layoutlmv3)** (from Microsoft Research Asia) released with the paper [LayoutLMv3: Pre-training for Document AI with Unified Text and Image Masking](https://arxiv.org/abs/2204.08387) by Yupan Huang, Tengchao Lv, Lei Cui, Yutong Lu, Furu Wei.
|
1. **[LayoutLMv3](https://huggingface.co/docs/transformers/model_doc/layoutlmv3)** (from Microsoft Research Asia) released with the paper [LayoutLMv3: Pre-training for Document AI with Unified Text and Image Masking](https://arxiv.org/abs/2204.08387) by Yupan Huang, Tengchao Lv, Lei Cui, Yutong Lu, Furu Wei.
|
||||||
1. **[LayoutXLM](https://huggingface.co/docs/transformers/model_doc/layoutxlm)** (from Microsoft Research Asia) released with the paper [LayoutXLM: Multimodal Pre-training for Multilingual Visually-rich Document Understanding](https://arxiv.org/abs/2104.08836) by Yiheng Xu, Tengchao Lv, Lei Cui, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Furu Wei.
|
1. **[LayoutXLM](https://huggingface.co/docs/transformers/model_doc/layoutxlm)** (from Microsoft Research Asia) released with the paper [LayoutXLM: Multimodal Pre-training for Multilingual Visually-rich Document Understanding](https://arxiv.org/abs/2104.08836) by Yiheng Xu, Tengchao Lv, Lei Cui, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Furu Wei.
|
||||||
1. **[LED](https://huggingface.co/docs/transformers/model_doc/led)** (from AllenAI) released with the paper [Longformer: The Long-Document Transformer](https://arxiv.org/abs/2004.05150) by Iz Beltagy, Matthew E. Peters, Arman Cohan.
|
1. **[LED](https://huggingface.co/docs/transformers/model_doc/led)** (from AllenAI) released with the paper [Longformer: The Long-Document Transformer](https://arxiv.org/abs/2004.05150) by Iz Beltagy, Matthew E. Peters, Arman Cohan.
|
||||||
1. **[LeViT](https://huggingface.co/docs/transformers/model_doc/levit)** (from Meta AI) released with the paper [LeViT: A Vision Transformer in ConvNet's Clothing for Faster Inference](https://arxiv.org/abs/2104.01136) by Ben Graham, Alaaeldin El-Nouby, Hugo Touvron, Pierre Stock, Armand Joulin, Hervé Jégou, Matthijs Douze.
|
1. **[LeViT](https://huggingface.co/docs/transformers/model_doc/levit)** (from Meta AI) released with the paper [LeViT: A Vision Transformer in ConvNet's Clothing for Faster Inference](https://arxiv.org/abs/2104.01136) by Ben Graham, Alaaeldin El-Nouby, Hugo Touvron, Pierre Stock, Armand Joulin, Hervé Jégou, Matthijs Douze.
|
||||||
1. **[LiLT](https://huggingface.co/docs/transformers/main/model_doc/lilt)** (from South China University of Technology) released with the paper [LiLT: A Simple yet Effective Language-Independent Layout Transformer for Structured Document Understanding](https://arxiv.org/abs/2202.13669) by Jiapeng Wang, Lianwen Jin, Kai Ding.
|
1. **[LiLT](https://huggingface.co/docs/transformers/model_doc/lilt)** (from South China University of Technology) released with the paper [LiLT: A Simple yet Effective Language-Independent Layout Transformer for Structured Document Understanding](https://arxiv.org/abs/2202.13669) by Jiapeng Wang, Lianwen Jin, Kai Ding.
|
||||||
|
1. **[LLaMA](https://huggingface.co/docs/transformers/model_doc/llama)** (from The FAIR team of Meta AI) released with the paper [LLaMA: Open and Efficient Foundation Language Models](https://arxiv.org/abs/2302.13971) by Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timothée Lacroix, Baptiste Rozière, Naman Goyal, Eric Hambro, Faisal Azhar, Aurelien Rodriguez, Armand Joulin, Edouard Grave, Guillaume Lample.
|
||||||
|
1. **[Llama2](https://huggingface.co/docs/transformers/model_doc/llama2)** (from The FAIR team of Meta AI) released with the paper [Llama2: Open Foundation and Fine-Tuned Chat Models](https://ai.meta.com/research/publications/llama-2-open-foundation-and-fine-tuned-chat-models/XXX) by Hugo Touvron, Louis Martin, Kevin Stone, Peter Albert, Amjad Almahairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajjwal Bhargava, Shruti Bhosale, Dan Bikel, Lukas Blecher, Cristian Canton Ferrer, Moya Chen, Guillem Cucurull, David Esiobu, Jude Fernandes, Jeremy Fu, Wenyin Fu, Brian Fuller, Cynthia Gao, Vedanuj Goswami, Naman Goyal, Anthony Hartshorn, Saghar Hosseini, Rui Hou, Hakan Inan, Marcin Kardas, Viktor Kerkez Madian Khabsa, Isabel Kloumann, Artem Korenev, Punit Singh Koura, Marie-Anne Lachaux, Thibaut Lavril, Jenya Lee, Diana Liskovich, Yinghai Lu, Yuning Mao, Xavier Martinet, Todor Mihaylov, Pushka rMishra, Igor Molybog, Yixin Nie, Andrew Poulton, Jeremy Reizenstein, Rashi Rungta, Kalyan Saladi, Alan Schelten, Ruan Silva, Eric Michael Smith, Ranjan Subramanian, Xiaoqing EllenTan, Binh Tang, Ross Taylor, Adina Williams, Jian Xiang Kuan, Puxin Xu, Zheng Yan, Iliyan Zarov, Yuchen Zhang, Angela Fan, Melanie Kambadur, Sharan Narang, Aurelien Rodriguez, Robert Stojnic, Sergey Edunov, Thomas Scialom.
|
||||||
1. **[Longformer](https://huggingface.co/docs/transformers/model_doc/longformer)** (from AllenAI) released with the paper [Longformer: The Long-Document Transformer](https://arxiv.org/abs/2004.05150) by Iz Beltagy, Matthew E. Peters, Arman Cohan.
|
1. **[Longformer](https://huggingface.co/docs/transformers/model_doc/longformer)** (from AllenAI) released with the paper [Longformer: The Long-Document Transformer](https://arxiv.org/abs/2004.05150) by Iz Beltagy, Matthew E. Peters, Arman Cohan.
|
||||||
1. **[LongT5](https://huggingface.co/docs/transformers/model_doc/longt5)** (from Google AI) released with the paper [LongT5: Efficient Text-To-Text Transformer for Long Sequences](https://arxiv.org/abs/2112.07916) by Mandy Guo, Joshua Ainslie, David Uthus, Santiago Ontanon, Jianmo Ni, Yun-Hsuan Sung, Yinfei Yang.
|
1. **[LongT5](https://huggingface.co/docs/transformers/model_doc/longt5)** (from Google AI) released with the paper [LongT5: Efficient Text-To-Text Transformer for Long Sequences](https://arxiv.org/abs/2112.07916) by Mandy Guo, Joshua Ainslie, David Uthus, Santiago Ontanon, Jianmo Ni, Yun-Hsuan Sung, Yinfei Yang.
|
||||||
1. **[LUKE](https://huggingface.co/docs/transformers/model_doc/luke)** (from Studio Ousia) released with the paper [LUKE: Deep Contextualized Entity Representations with Entity-aware Self-attention](https://arxiv.org/abs/2010.01057) by Ikuya Yamada, Akari Asai, Hiroyuki Shindo, Hideaki Takeda, Yuji Matsumoto.
|
1. **[LUKE](https://huggingface.co/docs/transformers/model_doc/luke)** (from Studio Ousia) released with the paper [LUKE: Deep Contextualized Entity Representations with Entity-aware Self-attention](https://arxiv.org/abs/2010.01057) by Ikuya Yamada, Akari Asai, Hiroyuki Shindo, Hideaki Takeda, Yuji Matsumoto.
|
||||||
@ -332,26 +394,41 @@ Current number of checkpoints: ** (from Facebook) released with the paper [Beyond English-Centric Multilingual Machine Translation](https://arxiv.org/abs/2010.11125) by Angela Fan, Shruti Bhosale, Holger Schwenk, Zhiyi Ma, Ahmed El-Kishky, Siddharth Goyal, Mandeep Baines, Onur Celebi, Guillaume Wenzek, Vishrav Chaudhary, Naman Goyal, Tom Birch, Vitaliy Liptchinsky, Sergey Edunov, Edouard Grave, Michael Auli, Armand Joulin.
|
1. **[M2M100](https://huggingface.co/docs/transformers/model_doc/m2m_100)** (from Facebook) released with the paper [Beyond English-Centric Multilingual Machine Translation](https://arxiv.org/abs/2010.11125) by Angela Fan, Shruti Bhosale, Holger Schwenk, Zhiyi Ma, Ahmed El-Kishky, Siddharth Goyal, Mandeep Baines, Onur Celebi, Guillaume Wenzek, Vishrav Chaudhary, Naman Goyal, Tom Birch, Vitaliy Liptchinsky, Sergey Edunov, Edouard Grave, Michael Auli, Armand Joulin.
|
||||||
1. **[MarianMT](https://huggingface.co/docs/transformers/model_doc/marian)** Machine translation models trained using [OPUS](http://opus.nlpl.eu/) data by Jörg Tiedemann. The [Marian Framework](https://marian-nmt.github.io/) is being developed by the Microsoft Translator Team.
|
1. **[MarianMT](https://huggingface.co/docs/transformers/model_doc/marian)** Machine translation models trained using [OPUS](http://opus.nlpl.eu/) data by Jörg Tiedemann. The [Marian Framework](https://marian-nmt.github.io/) is being developed by the Microsoft Translator Team.
|
||||||
1. **[MarkupLM](https://huggingface.co/docs/transformers/model_doc/markuplm)** (from Microsoft Research Asia) released with the paper [MarkupLM: Pre-training of Text and Markup Language for Visually-rich Document Understanding](https://arxiv.org/abs/2110.08518) by Junlong Li, Yiheng Xu, Lei Cui, Furu Wei.
|
1. **[MarkupLM](https://huggingface.co/docs/transformers/model_doc/markuplm)** (from Microsoft Research Asia) released with the paper [MarkupLM: Pre-training of Text and Markup Language for Visually-rich Document Understanding](https://arxiv.org/abs/2110.08518) by Junlong Li, Yiheng Xu, Lei Cui, Furu Wei.
|
||||||
|
1. **[Mask2Former](https://huggingface.co/docs/transformers/model_doc/mask2former)** (from FAIR and UIUC) released with the paper [Masked-attention Mask Transformer for Universal Image Segmentation](https://arxiv.org/abs/2112.01527) by Bowen Cheng, Ishan Misra, Alexander G. Schwing, Alexander Kirillov, Rohit Girdhar.
|
||||||
1. **[MaskFormer](https://huggingface.co/docs/transformers/model_doc/maskformer)** (from Meta and UIUC) released with the paper [Per-Pixel Classification is Not All You Need for Semantic Segmentation](https://arxiv.org/abs/2107.06278) by Bowen Cheng, Alexander G. Schwing, Alexander Kirillov.
|
1. **[MaskFormer](https://huggingface.co/docs/transformers/model_doc/maskformer)** (from Meta and UIUC) released with the paper [Per-Pixel Classification is Not All You Need for Semantic Segmentation](https://arxiv.org/abs/2107.06278) by Bowen Cheng, Alexander G. Schwing, Alexander Kirillov.
|
||||||
|
1. **[MatCha](https://huggingface.co/docs/transformers/model_doc/matcha)** (from Google AI) released with the paper [MatCha: Enhancing Visual Language Pretraining with Math Reasoning and Chart Derendering](https://arxiv.org/abs/2212.09662) by Fangyu Liu, Francesco Piccinno, Syrine Krichene, Chenxi Pang, Kenton Lee, Mandar Joshi, Yasemin Altun, Nigel Collier, Julian Martin Eisenschlos.
|
||||||
1. **[mBART](https://huggingface.co/docs/transformers/model_doc/mbart)** (from Facebook) released with the paper [Multilingual Denoising Pre-training for Neural Machine Translation](https://arxiv.org/abs/2001.08210) by Yinhan Liu, Jiatao Gu, Naman Goyal, Xian Li, Sergey Edunov, Marjan Ghazvininejad, Mike Lewis, Luke Zettlemoyer.
|
1. **[mBART](https://huggingface.co/docs/transformers/model_doc/mbart)** (from Facebook) released with the paper [Multilingual Denoising Pre-training for Neural Machine Translation](https://arxiv.org/abs/2001.08210) by Yinhan Liu, Jiatao Gu, Naman Goyal, Xian Li, Sergey Edunov, Marjan Ghazvininejad, Mike Lewis, Luke Zettlemoyer.
|
||||||
1. **[mBART-50](https://huggingface.co/docs/transformers/model_doc/mbart)** (from Facebook) released with the paper [Multilingual Translation with Extensible Multilingual Pretraining and Finetuning](https://arxiv.org/abs/2008.00401) by Yuqing Tang, Chau Tran, Xian Li, Peng-Jen Chen, Naman Goyal, Vishrav Chaudhary, Jiatao Gu, Angela Fan.
|
1. **[mBART-50](https://huggingface.co/docs/transformers/model_doc/mbart)** (from Facebook) released with the paper [Multilingual Translation with Extensible Multilingual Pretraining and Finetuning](https://arxiv.org/abs/2008.00401) by Yuqing Tang, Chau Tran, Xian Li, Peng-Jen Chen, Naman Goyal, Vishrav Chaudhary, Jiatao Gu, Angela Fan.
|
||||||
|
1. **[MEGA](https://huggingface.co/docs/transformers/model_doc/mega)** (from Meta/USC/CMU/SJTU) released with the paper [Mega: Moving Average Equipped Gated Attention](https://arxiv.org/abs/2209.10655) by Xuezhe Ma, Chunting Zhou, Xiang Kong, Junxian He, Liangke Gui, Graham Neubig, Jonathan May, and Luke Zettlemoyer.
|
||||||
1. **[Megatron-BERT](https://huggingface.co/docs/transformers/model_doc/megatron-bert)** (from NVIDIA) released with the paper [Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism](https://arxiv.org/abs/1909.08053) by Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper and Bryan Catanzaro.
|
1. **[Megatron-BERT](https://huggingface.co/docs/transformers/model_doc/megatron-bert)** (from NVIDIA) released with the paper [Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism](https://arxiv.org/abs/1909.08053) by Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper and Bryan Catanzaro.
|
||||||
1. **[Megatron-GPT2](https://huggingface.co/docs/transformers/model_doc/megatron_gpt2)** (from NVIDIA) released with the paper [Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism](https://arxiv.org/abs/1909.08053) by Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper and Bryan Catanzaro.
|
1. **[Megatron-GPT2](https://huggingface.co/docs/transformers/model_doc/megatron_gpt2)** (from NVIDIA) released with the paper [Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism](https://arxiv.org/abs/1909.08053) by Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper and Bryan Catanzaro.
|
||||||
|
1. **[MGP-STR](https://huggingface.co/docs/transformers/model_doc/mgp-str)** (from Alibaba Research) released with the paper [Multi-Granularity Prediction for Scene Text Recognition](https://arxiv.org/abs/2209.03592) by Peng Wang, Cheng Da, and Cong Yao.
|
||||||
1. **[mLUKE](https://huggingface.co/docs/transformers/model_doc/mluke)** (from Studio Ousia) released with the paper [mLUKE: The Power of Entity Representations in Multilingual Pretrained Language Models](https://arxiv.org/abs/2110.08151) by Ryokan Ri, Ikuya Yamada, and Yoshimasa Tsuruoka.
|
1. **[mLUKE](https://huggingface.co/docs/transformers/model_doc/mluke)** (from Studio Ousia) released with the paper [mLUKE: The Power of Entity Representations in Multilingual Pretrained Language Models](https://arxiv.org/abs/2110.08151) by Ryokan Ri, Ikuya Yamada, and Yoshimasa Tsuruoka.
|
||||||
|
1. **[MMS](https://huggingface.co/docs/transformers/model_doc/mms)** (from Facebook) released with the paper [Scaling Speech Technology to 1,000+ Languages](https://arxiv.org/abs/2305.13516) by Vineel Pratap, Andros Tjandra, Bowen Shi, Paden Tomasello, Arun Babu, Sayani Kundu, Ali Elkahky, Zhaoheng Ni, Apoorv Vyas, Maryam Fazel-Zarandi, Alexei Baevski, Yossi Adi, Xiaohui Zhang, Wei-Ning Hsu, Alexis Conneau, Michael Auli.
|
||||||
1. **[MobileBERT](https://huggingface.co/docs/transformers/model_doc/mobilebert)** (from CMU/Google Brain) released with the paper [MobileBERT: a Compact Task-Agnostic BERT for Resource-Limited Devices](https://arxiv.org/abs/2004.02984) by Zhiqing Sun, Hongkun Yu, Xiaodan Song, Renjie Liu, Yiming Yang, and Denny Zhou.
|
1. **[MobileBERT](https://huggingface.co/docs/transformers/model_doc/mobilebert)** (from CMU/Google Brain) released with the paper [MobileBERT: a Compact Task-Agnostic BERT for Resource-Limited Devices](https://arxiv.org/abs/2004.02984) by Zhiqing Sun, Hongkun Yu, Xiaodan Song, Renjie Liu, Yiming Yang, and Denny Zhou.
|
||||||
|
1. **[MobileNetV1](https://huggingface.co/docs/transformers/model_doc/mobilenet_v1)** (from Google Inc.) released with the paper [MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications](https://arxiv.org/abs/1704.04861) by Andrew G. Howard, Menglong Zhu, Bo Chen, Dmitry Kalenichenko, Weijun Wang, Tobias Weyand, Marco Andreetto, Hartwig Adam.
|
||||||
|
1. **[MobileNetV2](https://huggingface.co/docs/transformers/model_doc/mobilenet_v2)** (from Google Inc.) released with the paper [MobileNetV2: Inverted Residuals and Linear Bottlenecks](https://arxiv.org/abs/1801.04381) by Mark Sandler, Andrew Howard, Menglong Zhu, Andrey Zhmoginov, Liang-Chieh Chen.
|
||||||
1. **[MobileViT](https://huggingface.co/docs/transformers/model_doc/mobilevit)** (from Apple) released with the paper [MobileViT: Light-weight, General-purpose, and Mobile-friendly Vision Transformer](https://arxiv.org/abs/2110.02178) by Sachin Mehta and Mohammad Rastegari.
|
1. **[MobileViT](https://huggingface.co/docs/transformers/model_doc/mobilevit)** (from Apple) released with the paper [MobileViT: Light-weight, General-purpose, and Mobile-friendly Vision Transformer](https://arxiv.org/abs/2110.02178) by Sachin Mehta and Mohammad Rastegari.
|
||||||
|
1. **[MobileViTV2](https://huggingface.co/docs/transformers/model_doc/mobilevitv2)** (from Apple) released with the paper [Separable Self-attention for Mobile Vision Transformers](https://arxiv.org/abs/2206.02680) by Sachin Mehta and Mohammad Rastegari.
|
||||||
1. **[MPNet](https://huggingface.co/docs/transformers/model_doc/mpnet)** (from Microsoft Research) released with the paper [MPNet: Masked and Permuted Pre-training for Language Understanding](https://arxiv.org/abs/2004.09297) by Kaitao Song, Xu Tan, Tao Qin, Jianfeng Lu, Tie-Yan Liu.
|
1. **[MPNet](https://huggingface.co/docs/transformers/model_doc/mpnet)** (from Microsoft Research) released with the paper [MPNet: Masked and Permuted Pre-training for Language Understanding](https://arxiv.org/abs/2004.09297) by Kaitao Song, Xu Tan, Tao Qin, Jianfeng Lu, Tie-Yan Liu.
|
||||||
|
1. **[MRA](https://huggingface.co/docs/transformers/model_doc/mra)** (from the University of Wisconsin - Madison) released with the paper [Multi Resolution Analysis (MRA) for Approximate Self-Attention](https://arxiv.org/abs/2207.10284) by Zhanpeng Zeng, Sourav Pal, Jeffery Kline, Glenn M Fung, Vikas Singh.
|
||||||
1. **[MT5](https://huggingface.co/docs/transformers/model_doc/mt5)** (from Google AI) released with the paper [mT5: A massively multilingual pre-trained text-to-text transformer](https://arxiv.org/abs/2010.11934) by Linting Xue, Noah Constant, Adam Roberts, Mihir Kale, Rami Al-Rfou, Aditya Siddhant, Aditya Barua, Colin Raffel.
|
1. **[MT5](https://huggingface.co/docs/transformers/model_doc/mt5)** (from Google AI) released with the paper [mT5: A massively multilingual pre-trained text-to-text transformer](https://arxiv.org/abs/2010.11934) by Linting Xue, Noah Constant, Adam Roberts, Mihir Kale, Rami Al-Rfou, Aditya Siddhant, Aditya Barua, Colin Raffel.
|
||||||
|
1. **[MusicGen](https://huggingface.co/docs/transformers/model_doc/musicgen)** (from Meta) released with the paper [Simple and Controllable Music Generation](https://arxiv.org/abs/2306.05284) by Jade Copet, Felix Kreuk, Itai Gat, Tal Remez, David Kant, Gabriel Synnaeve, Yossi Adi and Alexandre Défossez.
|
||||||
1. **[MVP](https://huggingface.co/docs/transformers/model_doc/mvp)** (from RUC AI Box) released with the paper [MVP: Multi-task Supervised Pre-training for Natural Language Generation](https://arxiv.org/abs/2206.12131) by Tianyi Tang, Junyi Li, Wayne Xin Zhao and Ji-Rong Wen.
|
1. **[MVP](https://huggingface.co/docs/transformers/model_doc/mvp)** (from RUC AI Box) released with the paper [MVP: Multi-task Supervised Pre-training for Natural Language Generation](https://arxiv.org/abs/2206.12131) by Tianyi Tang, Junyi Li, Wayne Xin Zhao and Ji-Rong Wen.
|
||||||
|
1. **[NAT](https://huggingface.co/docs/transformers/model_doc/nat)** (from SHI Labs) released with the paper [Neighborhood Attention Transformer](https://arxiv.org/abs/2204.07143) by Ali Hassani, Steven Walton, Jiachen Li, Shen Li, and Humphrey Shi.
|
||||||
1. **[Nezha](https://huggingface.co/docs/transformers/model_doc/nezha)** (from Huawei Noah’s Ark Lab) released with the paper [NEZHA: Neural Contextualized Representation for Chinese Language Understanding](https://arxiv.org/abs/1909.00204) by Junqiu Wei, Xiaozhe Ren, Xiaoguang Li, Wenyong Huang, Yi Liao, Yasheng Wang, Jiashu Lin, Xin Jiang, Xiao Chen and Qun Liu.
|
1. **[Nezha](https://huggingface.co/docs/transformers/model_doc/nezha)** (from Huawei Noah’s Ark Lab) released with the paper [NEZHA: Neural Contextualized Representation for Chinese Language Understanding](https://arxiv.org/abs/1909.00204) by Junqiu Wei, Xiaozhe Ren, Xiaoguang Li, Wenyong Huang, Yi Liao, Yasheng Wang, Jiashu Lin, Xin Jiang, Xiao Chen and Qun Liu.
|
||||||
1. **[NLLB](https://huggingface.co/docs/transformers/model_doc/nllb)** (from Meta) released with the paper [No Language Left Behind: Scaling Human-Centered Machine Translation](https://arxiv.org/abs/2207.04672) by the NLLB team.
|
1. **[NLLB](https://huggingface.co/docs/transformers/model_doc/nllb)** (from Meta) released with the paper [No Language Left Behind: Scaling Human-Centered Machine Translation](https://arxiv.org/abs/2207.04672) by the NLLB team.
|
||||||
|
1. **[NLLB-MOE](https://huggingface.co/docs/transformers/model_doc/nllb-moe)** (from Meta) released with the paper [No Language Left Behind: Scaling Human-Centered Machine Translation](https://arxiv.org/abs/2207.04672) by the NLLB team.
|
||||||
1. **[Nyströmformer](https://huggingface.co/docs/transformers/model_doc/nystromformer)** (from the University of Wisconsin - Madison) released with the paper [Nyströmformer: A Nyström-Based Algorithm for Approximating Self-Attention](https://arxiv.org/abs/2102.03902) by Yunyang Xiong, Zhanpeng Zeng, Rudrasis Chakraborty, Mingxing Tan, Glenn Fung, Yin Li, Vikas Singh.
|
1. **[Nyströmformer](https://huggingface.co/docs/transformers/model_doc/nystromformer)** (from the University of Wisconsin - Madison) released with the paper [Nyströmformer: A Nyström-Based Algorithm for Approximating Self-Attention](https://arxiv.org/abs/2102.03902) by Yunyang Xiong, Zhanpeng Zeng, Rudrasis Chakraborty, Mingxing Tan, Glenn Fung, Yin Li, Vikas Singh.
|
||||||
|
1. **[OneFormer](https://huggingface.co/docs/transformers/model_doc/oneformer)** (from SHI Labs) released with the paper [OneFormer: One Transformer to Rule Universal Image Segmentation](https://arxiv.org/abs/2211.06220) by Jitesh Jain, Jiachen Li, MangTik Chiu, Ali Hassani, Nikita Orlov, Humphrey Shi.
|
||||||
|
1. **[OpenLlama](https://huggingface.co/docs/transformers/model_doc/open-llama)** (from [s-JoL](https://huggingface.co/s-JoL)) released in [Open-Llama](https://github.com/s-JoL/Open-Llama).
|
||||||
1. **[OPT](https://huggingface.co/docs/transformers/master/model_doc/opt)** (from Meta AI) released with the paper [OPT: Open Pre-trained Transformer Language Models](https://arxiv.org/abs/2205.01068) by Susan Zhang, Stephen Roller, Naman Goyal, Mikel Artetxe, Moya Chen, Shuohui Chen et al.
|
1. **[OPT](https://huggingface.co/docs/transformers/master/model_doc/opt)** (from Meta AI) released with the paper [OPT: Open Pre-trained Transformer Language Models](https://arxiv.org/abs/2205.01068) by Susan Zhang, Stephen Roller, Naman Goyal, Mikel Artetxe, Moya Chen, Shuohui Chen et al.
|
||||||
1. **[OWL-ViT](https://huggingface.co/docs/transformers/model_doc/owlvit)** (from Google AI) released with the paper [Simple Open-Vocabulary Object Detection with Vision Transformers](https://arxiv.org/abs/2205.06230) by Matthias Minderer, Alexey Gritsenko, Austin Stone, Maxim Neumann, Dirk Weissenborn, Alexey Dosovitskiy, Aravindh Mahendran, Anurag Arnab, Mostafa Dehghani, Zhuoran Shen, Xiao Wang, Xiaohua Zhai, Thomas Kipf, and Neil Houlsby.
|
1. **[OWL-ViT](https://huggingface.co/docs/transformers/model_doc/owlvit)** (from Google AI) released with the paper [Simple Open-Vocabulary Object Detection with Vision Transformers](https://arxiv.org/abs/2205.06230) by Matthias Minderer, Alexey Gritsenko, Austin Stone, Maxim Neumann, Dirk Weissenborn, Alexey Dosovitskiy, Aravindh Mahendran, Anurag Arnab, Mostafa Dehghani, Zhuoran Shen, Xiao Wang, Xiaohua Zhai, Thomas Kipf, and Neil Houlsby.
|
||||||
1. **[Pegasus](https://huggingface.co/docs/transformers/model_doc/pegasus)** (from Google) released with the paper [PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive Summarization](https://arxiv.org/abs/1912.08777) by Jingqing Zhang, Yao Zhao, Mohammad Saleh and Peter J. Liu.
|
1. **[Pegasus](https://huggingface.co/docs/transformers/model_doc/pegasus)** (from Google) released with the paper [PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive Summarization](https://arxiv.org/abs/1912.08777) by Jingqing Zhang, Yao Zhao, Mohammad Saleh and Peter J. Liu.
|
||||||
1. **[PEGASUS-X](https://huggingface.co/docs/transformers/model_doc/pegasus_x)** (from Google) released with the paper [Investigating Efficiently Extending Transformers for Long Input Summarization](https://arxiv.org/abs/2208.04347) by Jason Phang, Yao Zhao, and Peter J. Liu.
|
1. **[PEGASUS-X](https://huggingface.co/docs/transformers/model_doc/pegasus_x)** (from Google) released with the paper [Investigating Efficiently Extending Transformers for Long Input Summarization](https://arxiv.org/abs/2208.04347) by Jason Phang, Yao Zhao, and Peter J. Liu.
|
||||||
1. **[Perceiver IO](https://huggingface.co/docs/transformers/model_doc/perceiver)** (from Deepmind) released with the paper [Perceiver IO: A General Architecture for Structured Inputs & Outputs](https://arxiv.org/abs/2107.14795) by Andrew Jaegle, Sebastian Borgeaud, Jean-Baptiste Alayrac, Carl Doersch, Catalin Ionescu, David Ding, Skanda Koppula, Daniel Zoran, Andrew Brock, Evan Shelhamer, Olivier Hénaff, Matthew M. Botvinick, Andrew Zisserman, Oriol Vinyals, João Carreira.
|
1. **[Perceiver IO](https://huggingface.co/docs/transformers/model_doc/perceiver)** (from Deepmind) released with the paper [Perceiver IO: A General Architecture for Structured Inputs & Outputs](https://arxiv.org/abs/2107.14795) by Andrew Jaegle, Sebastian Borgeaud, Jean-Baptiste Alayrac, Carl Doersch, Catalin Ionescu, David Ding, Skanda Koppula, Daniel Zoran, Andrew Brock, Evan Shelhamer, Olivier Hénaff, Matthew M. Botvinick, Andrew Zisserman, Oriol Vinyals, João Carreira.
|
||||||
1. **[PhoBERT](https://huggingface.co/docs/transformers/model_doc/phobert)** (from VinAI Research) released with the paper [PhoBERT: Pre-trained language models for Vietnamese](https://www.aclweb.org/anthology/2020.findings-emnlp.92/) by Dat Quoc Nguyen and Anh Tuan Nguyen.
|
1. **[PhoBERT](https://huggingface.co/docs/transformers/model_doc/phobert)** (from VinAI Research) released with the paper [PhoBERT: Pre-trained language models for Vietnamese](https://www.aclweb.org/anthology/2020.findings-emnlp.92/) by Dat Quoc Nguyen and Anh Tuan Nguyen.
|
||||||
|
1. **[Pix2Struct](https://huggingface.co/docs/transformers/model_doc/pix2struct)** (from Google) released with the paper [Pix2Struct: Screenshot Parsing as Pretraining for Visual Language Understanding](https://arxiv.org/abs/2210.03347) by Kenton Lee, Mandar Joshi, Iulia Turc, Hexiang Hu, Fangyu Liu, Julian Eisenschlos, Urvashi Khandelwal, Peter Shaw, Ming-Wei Chang, Kristina Toutanova.
|
||||||
1. **[PLBart](https://huggingface.co/docs/transformers/model_doc/plbart)** (from UCLA NLP) released with the paper [Unified Pre-training for Program Understanding and Generation](https://arxiv.org/abs/2103.06333) by Wasi Uddin Ahmad, Saikat Chakraborty, Baishakhi Ray, Kai-Wei Chang.
|
1. **[PLBart](https://huggingface.co/docs/transformers/model_doc/plbart)** (from UCLA NLP) released with the paper [Unified Pre-training for Program Understanding and Generation](https://arxiv.org/abs/2103.06333) by Wasi Uddin Ahmad, Saikat Chakraborty, Baishakhi Ray, Kai-Wei Chang.
|
||||||
1. **[PoolFormer](https://huggingface.co/docs/transformers/model_doc/poolformer)** (from Sea AI Labs) released with the paper [MetaFormer is Actually What You Need for Vision](https://arxiv.org/abs/2111.11418) by Yu, Weihao and Luo, Mi and Zhou, Pan and Si, Chenyang and Zhou, Yichen and Wang, Xinchao and Feng, Jiashi and Yan, Shuicheng.
|
1. **[PoolFormer](https://huggingface.co/docs/transformers/model_doc/poolformer)** (from Sea AI Labs) released with the paper [MetaFormer is Actually What You Need for Vision](https://arxiv.org/abs/2111.11418) by Yu, Weihao and Luo, Mi and Zhou, Pan and Si, Chenyang and Zhou, Yichen and Wang, Xinchao and Feng, Jiashi and Yan, Shuicheng.
|
||||||
1. **[ProphetNet](https://huggingface.co/docs/transformers/model_doc/prophetnet)** (from Microsoft Research) released with the paper [ProphetNet: Predicting Future N-gram for Sequence-to-Sequence Pre-training](https://arxiv.org/abs/2001.04063) by Yu Yan, Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang and Ming Zhou.
|
1. **[ProphetNet](https://huggingface.co/docs/transformers/model_doc/prophetnet)** (from Microsoft Research) released with the paper [ProphetNet: Predicting Future N-gram for Sequence-to-Sequence Pre-training](https://arxiv.org/abs/2001.04063) by Yu Yan, Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang and Ming Zhou.
|
||||||
@ -363,45 +440,62 @@ Current number of checkpoints: ** (from Google Research) released with the paper [Rethinking embedding coupling in pre-trained language models](https://arxiv.org/abs/2010.12821) by Hyung Won Chung, Thibault Févry, Henry Tsai, M. Johnson, Sebastian Ruder.
|
1. **[RemBERT](https://huggingface.co/docs/transformers/model_doc/rembert)** (from Google Research) released with the paper [Rethinking embedding coupling in pre-trained language models](https://arxiv.org/abs/2010.12821) by Hyung Won Chung, Thibault Févry, Henry Tsai, M. Johnson, Sebastian Ruder.
|
||||||
1. **[ResNet](https://huggingface.co/docs/transformers/model_doc/resnet)** (from Microsoft Research) released with the paper [Deep Residual Learning for Image Recognition](https://arxiv.org/abs/1512.03385) by Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun.
|
1. **[ResNet](https://huggingface.co/docs/transformers/model_doc/resnet)** (from Microsoft Research) released with the paper [Deep Residual Learning for Image Recognition](https://arxiv.org/abs/1512.03385) by Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun.
|
||||||
1. **[RoBERTa](https://huggingface.co/docs/transformers/model_doc/roberta)** (from Facebook), released together with the paper [RoBERTa: A Robustly Optimized BERT Pretraining Approach](https://arxiv.org/abs/1907.11692) by Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, Veselin Stoyanov.
|
1. **[RoBERTa](https://huggingface.co/docs/transformers/model_doc/roberta)** (from Facebook), released together with the paper [RoBERTa: A Robustly Optimized BERT Pretraining Approach](https://arxiv.org/abs/1907.11692) by Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, Veselin Stoyanov.
|
||||||
|
1. **[RoBERTa-PreLayerNorm](https://huggingface.co/docs/transformers/model_doc/roberta-prelayernorm)** (from Facebook) released with the paper [fairseq: A Fast, Extensible Toolkit for Sequence Modeling](https://arxiv.org/abs/1904.01038) by Myle Ott, Sergey Edunov, Alexei Baevski, Angela Fan, Sam Gross, Nathan Ng, David Grangier, Michael Auli.
|
||||||
|
1. **[RoCBert](https://huggingface.co/docs/transformers/model_doc/roc_bert)** (from WeChatAI) released with the paper [RoCBert: Robust Chinese Bert with Multimodal Contrastive Pretraining](https://aclanthology.org/2022.acl-long.65.pdf) by HuiSu, WeiweiShi, XiaoyuShen, XiaoZhou, TuoJi, JiaruiFang, JieZhou.
|
||||||
1. **[RoFormer](https://huggingface.co/docs/transformers/model_doc/roformer)** (from ZhuiyiTechnology), released together with the paper [RoFormer: Enhanced Transformer with Rotary Position Embedding](https://arxiv.org/abs/2104.09864) by Jianlin Su and Yu Lu and Shengfeng Pan and Bo Wen and Yunfeng Liu.
|
1. **[RoFormer](https://huggingface.co/docs/transformers/model_doc/roformer)** (from ZhuiyiTechnology), released together with the paper [RoFormer: Enhanced Transformer with Rotary Position Embedding](https://arxiv.org/abs/2104.09864) by Jianlin Su and Yu Lu and Shengfeng Pan and Bo Wen and Yunfeng Liu.
|
||||||
|
1. **[RWKV](https://huggingface.co/docs/transformers/model_doc/rwkv)** (from Bo Peng), released on [this repo](https://github.com/BlinkDL/RWKV-LM) by Bo Peng.
|
||||||
1. **[SegFormer](https://huggingface.co/docs/transformers/model_doc/segformer)** (from NVIDIA) released with the paper [SegFormer: Simple and Efficient Design for Semantic Segmentation with Transformers](https://arxiv.org/abs/2105.15203) by Enze Xie, Wenhai Wang, Zhiding Yu, Anima Anandkumar, Jose M. Alvarez, Ping Luo.
|
1. **[SegFormer](https://huggingface.co/docs/transformers/model_doc/segformer)** (from NVIDIA) released with the paper [SegFormer: Simple and Efficient Design for Semantic Segmentation with Transformers](https://arxiv.org/abs/2105.15203) by Enze Xie, Wenhai Wang, Zhiding Yu, Anima Anandkumar, Jose M. Alvarez, Ping Luo.
|
||||||
|
1. **[Segment Anything](https://huggingface.co/docs/transformers/model_doc/sam)** (from Meta AI) released with the paper [Segment Anything](https://arxiv.org/pdf/2304.02643v1.pdf) by Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alex Berg, Wan-Yen Lo, Piotr Dollar, Ross Girshick.
|
||||||
1. **[SEW](https://huggingface.co/docs/transformers/model_doc/sew)** (from ASAPP) released with the paper [Performance-Efficiency Trade-offs in Unsupervised Pre-training for Speech Recognition](https://arxiv.org/abs/2109.06870) by Felix Wu, Kwangyoun Kim, Jing Pan, Kyu Han, Kilian Q. Weinberger, Yoav Artzi.
|
1. **[SEW](https://huggingface.co/docs/transformers/model_doc/sew)** (from ASAPP) released with the paper [Performance-Efficiency Trade-offs in Unsupervised Pre-training for Speech Recognition](https://arxiv.org/abs/2109.06870) by Felix Wu, Kwangyoun Kim, Jing Pan, Kyu Han, Kilian Q. Weinberger, Yoav Artzi.
|
||||||
1. **[SEW-D](https://huggingface.co/docs/transformers/model_doc/sew_d)** (from ASAPP) released with the paper [Performance-Efficiency Trade-offs in Unsupervised Pre-training for Speech Recognition](https://arxiv.org/abs/2109.06870) by Felix Wu, Kwangyoun Kim, Jing Pan, Kyu Han, Kilian Q. Weinberger, Yoav Artzi.
|
1. **[SEW-D](https://huggingface.co/docs/transformers/model_doc/sew_d)** (from ASAPP) released with the paper [Performance-Efficiency Trade-offs in Unsupervised Pre-training for Speech Recognition](https://arxiv.org/abs/2109.06870) by Felix Wu, Kwangyoun Kim, Jing Pan, Kyu Han, Kilian Q. Weinberger, Yoav Artzi.
|
||||||
|
1. **[SpeechT5](https://huggingface.co/docs/transformers/model_doc/speecht5)** (from Microsoft Research) released with the paper [SpeechT5: Unified-Modal Encoder-Decoder Pre-Training for Spoken Language Processing](https://arxiv.org/abs/2110.07205) by Junyi Ao, Rui Wang, Long Zhou, Chengyi Wang, Shuo Ren, Yu Wu, Shujie Liu, Tom Ko, Qing Li, Yu Zhang, Zhihua Wei, Yao Qian, Jinyu Li, Furu Wei.
|
||||||
1. **[SpeechToTextTransformer](https://huggingface.co/docs/transformers/model_doc/speech_to_text)** (from Facebook), released together with the paper [fairseq S2T: Fast Speech-to-Text Modeling with fairseq](https://arxiv.org/abs/2010.05171) by Changhan Wang, Yun Tang, Xutai Ma, Anne Wu, Dmytro Okhonko, Juan Pino.
|
1. **[SpeechToTextTransformer](https://huggingface.co/docs/transformers/model_doc/speech_to_text)** (from Facebook), released together with the paper [fairseq S2T: Fast Speech-to-Text Modeling with fairseq](https://arxiv.org/abs/2010.05171) by Changhan Wang, Yun Tang, Xutai Ma, Anne Wu, Dmytro Okhonko, Juan Pino.
|
||||||
1. **[SpeechToTextTransformer2](https://huggingface.co/docs/transformers/model_doc/speech_to_text_2)** (from Facebook), released together with the paper [Large-Scale Self- and Semi-Supervised Learning for Speech Translation](https://arxiv.org/abs/2104.06678) by Changhan Wang, Anne Wu, Juan Pino, Alexei Baevski, Michael Auli, Alexis Conneau.
|
1. **[SpeechToTextTransformer2](https://huggingface.co/docs/transformers/model_doc/speech_to_text_2)** (from Facebook), released together with the paper [Large-Scale Self- and Semi-Supervised Learning for Speech Translation](https://arxiv.org/abs/2104.06678) by Changhan Wang, Anne Wu, Juan Pino, Alexei Baevski, Michael Auli, Alexis Conneau.
|
||||||
1. **[Splinter](https://huggingface.co/docs/transformers/model_doc/splinter)** (from Tel Aviv University), released together with the paper [Few-Shot Question Answering by Pretraining Span Selection](https://arxiv.org/abs/2101.00438) by Ori Ram, Yuval Kirstain, Jonathan Berant, Amir Globerson, Omer Levy.
|
1. **[Splinter](https://huggingface.co/docs/transformers/model_doc/splinter)** (from Tel Aviv University), released together with the paper [Few-Shot Question Answering by Pretraining Span Selection](https://arxiv.org/abs/2101.00438) by Ori Ram, Yuval Kirstain, Jonathan Berant, Amir Globerson, Omer Levy.
|
||||||
1. **[SqueezeBERT](https://huggingface.co/docs/transformers/model_doc/squeezebert)** (from Berkeley) released with the paper [SqueezeBERT: What can computer vision teach NLP about efficient neural networks?](https://arxiv.org/abs/2006.11316) by Forrest N. Iandola, Albert E. Shaw, Ravi Krishna, and Kurt W. Keutzer.
|
1. **[SqueezeBERT](https://huggingface.co/docs/transformers/model_doc/squeezebert)** (from Berkeley) released with the paper [SqueezeBERT: What can computer vision teach NLP about efficient neural networks?](https://arxiv.org/abs/2006.11316) by Forrest N. Iandola, Albert E. Shaw, Ravi Krishna, and Kurt W. Keutzer.
|
||||||
|
1. **[SwiftFormer](https://huggingface.co/docs/transformers/model_doc/swiftformer)** (from MBZUAI) released with the paper [SwiftFormer: Efficient Additive Attention for Transformer-based Real-time Mobile Vision Applications](https://arxiv.org/abs/2303.15446) by Abdelrahman Shaker, Muhammad Maaz, Hanoona Rasheed, Salman Khan, Ming-Hsuan Yang, Fahad Shahbaz Khan.
|
||||||
1. **[Swin Transformer](https://huggingface.co/docs/transformers/model_doc/swin)** (from Microsoft) released with the paper [Swin Transformer: Hierarchical Vision Transformer using Shifted Windows](https://arxiv.org/abs/2103.14030) by Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, Baining Guo.
|
1. **[Swin Transformer](https://huggingface.co/docs/transformers/model_doc/swin)** (from Microsoft) released with the paper [Swin Transformer: Hierarchical Vision Transformer using Shifted Windows](https://arxiv.org/abs/2103.14030) by Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, Baining Guo.
|
||||||
1. **[Swin Transformer V2](https://huggingface.co/docs/transformers/model_doc/swinv2)** (from Microsoft) released with the paper [Swin Transformer V2: Scaling Up Capacity and Resolution](https://arxiv.org/abs/2111.09883) by Ze Liu, Han Hu, Yutong Lin, Zhuliang Yao, Zhenda Xie, Yixuan Wei, Jia Ning, Yue Cao, Zheng Zhang, Li Dong, Furu Wei, Baining Guo.
|
1. **[Swin Transformer V2](https://huggingface.co/docs/transformers/model_doc/swinv2)** (from Microsoft) released with the paper [Swin Transformer V2: Scaling Up Capacity and Resolution](https://arxiv.org/abs/2111.09883) by Ze Liu, Han Hu, Yutong Lin, Zhuliang Yao, Zhenda Xie, Yixuan Wei, Jia Ning, Yue Cao, Zheng Zhang, Li Dong, Furu Wei, Baining Guo.
|
||||||
|
1. **[Swin2SR](https://huggingface.co/docs/transformers/model_doc/swin2sr)** (from University of Würzburg) released with the paper [Swin2SR: SwinV2 Transformer for Compressed Image Super-Resolution and Restoration](https://arxiv.org/abs/2209.11345) by Marcos V. Conde, Ui-Jin Choi, Maxime Burchi, Radu Timofte.
|
||||||
|
1. **[SwitchTransformers](https://huggingface.co/docs/transformers/model_doc/switch_transformers)** (from Google) released with the paper [Switch Transformers: Scaling to Trillion Parameter Models with Simple and Efficient Sparsity](https://arxiv.org/abs/2101.03961) by William Fedus, Barret Zoph, Noam Shazeer.
|
||||||
1. **[T5](https://huggingface.co/docs/transformers/model_doc/t5)** (from Google AI) released with the paper [Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer](https://arxiv.org/abs/1910.10683) by Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu.
|
1. **[T5](https://huggingface.co/docs/transformers/model_doc/t5)** (from Google AI) released with the paper [Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer](https://arxiv.org/abs/1910.10683) by Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu.
|
||||||
1. **[T5v1.1](https://huggingface.co/docs/transformers/model_doc/t5v1.1)** (from Google AI) released in the repository [google-research/text-to-text-transfer-transformer](https://github.com/google-research/text-to-text-transfer-transformer/blob/main/released_checkpoints.md#t511) by Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu.
|
1. **[T5v1.1](https://huggingface.co/docs/transformers/model_doc/t5v1.1)** (from Google AI) released in the repository [google-research/text-to-text-transfer-transformer](https://github.com/google-research/text-to-text-transfer-transformer/blob/main/released_checkpoints.md#t511) by Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu.
|
||||||
|
1. **[Table Transformer](https://huggingface.co/docs/transformers/model_doc/table-transformer)** (from Microsoft Research) released with the paper [PubTables-1M: Towards Comprehensive Table Extraction From Unstructured Documents](https://arxiv.org/abs/2110.00061) by Brandon Smock, Rohith Pesala, Robin Abraham.
|
||||||
1. **[TAPAS](https://huggingface.co/docs/transformers/model_doc/tapas)** (from Google AI) released with the paper [TAPAS: Weakly Supervised Table Parsing via Pre-training](https://arxiv.org/abs/2004.02349) by Jonathan Herzig, Paweł Krzysztof Nowak, Thomas Müller, Francesco Piccinno and Julian Martin Eisenschlos.
|
1. **[TAPAS](https://huggingface.co/docs/transformers/model_doc/tapas)** (from Google AI) released with the paper [TAPAS: Weakly Supervised Table Parsing via Pre-training](https://arxiv.org/abs/2004.02349) by Jonathan Herzig, Paweł Krzysztof Nowak, Thomas Müller, Francesco Piccinno and Julian Martin Eisenschlos.
|
||||||
1. **[TAPEX](https://huggingface.co/docs/transformers/model_doc/tapex)** (from Microsoft Research) released with the paper [TAPEX: Table Pre-training via Learning a Neural SQL Executor](https://arxiv.org/abs/2107.07653) by Qian Liu, Bei Chen, Jiaqi Guo, Morteza Ziyadi, Zeqi Lin, Weizhu Chen, Jian-Guang Lou.
|
1. **[TAPEX](https://huggingface.co/docs/transformers/model_doc/tapex)** (from Microsoft Research) released with the paper [TAPEX: Table Pre-training via Learning a Neural SQL Executor](https://arxiv.org/abs/2107.07653) by Qian Liu, Bei Chen, Jiaqi Guo, Morteza Ziyadi, Zeqi Lin, Weizhu Chen, Jian-Guang Lou.
|
||||||
1. **[Time Series Transformer](https://huggingface.co/docs/transformers/model_doc/time_series_transformer)** (from HuggingFace).
|
1. **[Time Series Transformer](https://huggingface.co/docs/transformers/model_doc/time_series_transformer)** (from HuggingFace).
|
||||||
|
1. **[TimeSformer](https://huggingface.co/docs/transformers/model_doc/timesformer)** (from Facebook) released with the paper [Is Space-Time Attention All You Need for Video Understanding?](https://arxiv.org/abs/2102.05095) by Gedas Bertasius, Heng Wang, Lorenzo Torresani.
|
||||||
1. **[Trajectory Transformer](https://huggingface.co/docs/transformers/model_doc/trajectory_transformers)** (from the University of California at Berkeley) released with the paper [Offline Reinforcement Learning as One Big Sequence Modeling Problem](https://arxiv.org/abs/2106.02039) by Michael Janner, Qiyang Li, Sergey Levine
|
1. **[Trajectory Transformer](https://huggingface.co/docs/transformers/model_doc/trajectory_transformers)** (from the University of California at Berkeley) released with the paper [Offline Reinforcement Learning as One Big Sequence Modeling Problem](https://arxiv.org/abs/2106.02039) by Michael Janner, Qiyang Li, Sergey Levine
|
||||||
1. **[Transformer-XL](https://huggingface.co/docs/transformers/model_doc/transfo-xl)** (from Google/CMU) released with the paper [Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context](https://arxiv.org/abs/1901.02860) by Zihang Dai*, Zhilin Yang*, Yiming Yang, Jaime Carbonell, Quoc V. Le, Ruslan Salakhutdinov.
|
1. **[Transformer-XL](https://huggingface.co/docs/transformers/model_doc/transfo-xl)** (from Google/CMU) released with the paper [Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context](https://arxiv.org/abs/1901.02860) by Zihang Dai*, Zhilin Yang*, Yiming Yang, Jaime Carbonell, Quoc V. Le, Ruslan Salakhutdinov.
|
||||||
1. **[TrOCR](https://huggingface.co/docs/transformers/model_doc/trocr)** (from Microsoft), released together with the paper [TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models](https://arxiv.org/abs/2109.10282) by Minghao Li, Tengchao Lv, Lei Cui, Yijuan Lu, Dinei Florencio, Cha Zhang, Zhoujun Li, Furu Wei.
|
1. **[TrOCR](https://huggingface.co/docs/transformers/model_doc/trocr)** (from Microsoft), released together with the paper [TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models](https://arxiv.org/abs/2109.10282) by Minghao Li, Tengchao Lv, Lei Cui, Yijuan Lu, Dinei Florencio, Cha Zhang, Zhoujun Li, Furu Wei.
|
||||||
|
1. **[TVLT](https://huggingface.co/docs/transformers/model_doc/tvlt)** (from UNC Chapel Hill) released with the paper [TVLT: Textless Vision-Language Transformer](https://arxiv.org/abs/2209.14156) by Zineng Tang, Jaemin Cho, Yixin Nie, Mohit Bansal.
|
||||||
1. **[UL2](https://huggingface.co/docs/transformers/model_doc/ul2)** (from Google Research) released with the paper [Unifying Language Learning Paradigms](https://arxiv.org/abs/2205.05131v1) by Yi Tay, Mostafa Dehghani, Vinh Q. Tran, Xavier Garcia, Dara Bahri, Tal Schuster, Huaixiu Steven Zheng, Neil Houlsby, Donald Metzler
|
1. **[UL2](https://huggingface.co/docs/transformers/model_doc/ul2)** (from Google Research) released with the paper [Unifying Language Learning Paradigms](https://arxiv.org/abs/2205.05131v1) by Yi Tay, Mostafa Dehghani, Vinh Q. Tran, Xavier Garcia, Dara Bahri, Tal Schuster, Huaixiu Steven Zheng, Neil Houlsby, Donald Metzler
|
||||||
|
1. **[UMT5](https://huggingface.co/docs/transformers/model_doc/umt5)** (from Google Research) released with the paper [UniMax: Fairer and More Effective Language Sampling for Large-Scale Multilingual Pretraining](https://openreview.net/forum?id=kXwdL1cWOAi) by Hyung Won Chung, Xavier Garcia, Adam Roberts, Yi Tay, Orhan Firat, Sharan Narang, Noah Constant.
|
||||||
1. **[UniSpeech](https://huggingface.co/docs/transformers/model_doc/unispeech)** (from Microsoft Research) released with the paper [UniSpeech: Unified Speech Representation Learning with Labeled and Unlabeled Data](https://arxiv.org/abs/2101.07597) by Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei, Michael Zeng, Xuedong Huang.
|
1. **[UniSpeech](https://huggingface.co/docs/transformers/model_doc/unispeech)** (from Microsoft Research) released with the paper [UniSpeech: Unified Speech Representation Learning with Labeled and Unlabeled Data](https://arxiv.org/abs/2101.07597) by Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei, Michael Zeng, Xuedong Huang.
|
||||||
1. **[UniSpeechSat](https://huggingface.co/docs/transformers/model_doc/unispeech-sat)** (from Microsoft Research) released with the paper [UNISPEECH-SAT: UNIVERSAL SPEECH REPRESENTATION LEARNING WITH SPEAKER AWARE PRE-TRAINING](https://arxiv.org/abs/2110.05752) by Sanyuan Chen, Yu Wu, Chengyi Wang, Zhengyang Chen, Zhuo Chen, Shujie Liu, Jian Wu, Yao Qian, Furu Wei, Jinyu Li, Xiangzhan Yu.
|
1. **[UniSpeechSat](https://huggingface.co/docs/transformers/model_doc/unispeech-sat)** (from Microsoft Research) released with the paper [UNISPEECH-SAT: UNIVERSAL SPEECH REPRESENTATION LEARNING WITH SPEAKER AWARE PRE-TRAINING](https://arxiv.org/abs/2110.05752) by Sanyuan Chen, Yu Wu, Chengyi Wang, Zhengyang Chen, Zhuo Chen, Shujie Liu, Jian Wu, Yao Qian, Furu Wei, Jinyu Li, Xiangzhan Yu.
|
||||||
|
1. **[UPerNet](https://huggingface.co/docs/transformers/model_doc/upernet)** (from Peking University) released with the paper [Unified Perceptual Parsing for Scene Understanding](https://arxiv.org/abs/1807.10221) by Tete Xiao, Yingcheng Liu, Bolei Zhou, Yuning Jiang, Jian Sun.
|
||||||
1. **[VAN](https://huggingface.co/docs/transformers/model_doc/van)** (from Tsinghua University and Nankai University) released with the paper [Visual Attention Network](https://arxiv.org/abs/2202.09741) by Meng-Hao Guo, Cheng-Ze Lu, Zheng-Ning Liu, Ming-Ming Cheng, Shi-Min Hu.
|
1. **[VAN](https://huggingface.co/docs/transformers/model_doc/van)** (from Tsinghua University and Nankai University) released with the paper [Visual Attention Network](https://arxiv.org/abs/2202.09741) by Meng-Hao Guo, Cheng-Ze Lu, Zheng-Ning Liu, Ming-Ming Cheng, Shi-Min Hu.
|
||||||
1. **[VideoMAE](https://huggingface.co/docs/transformers/model_doc/videomae)** (from Multimedia Computing Group, Nanjing University) released with the paper [VideoMAE: Masked Autoencoders are Data-Efficient Learners for Self-Supervised Video Pre-Training](https://arxiv.org/abs/2203.12602) by Zhan Tong, Yibing Song, Jue Wang, Limin Wang.
|
1. **[VideoMAE](https://huggingface.co/docs/transformers/model_doc/videomae)** (from Multimedia Computing Group, Nanjing University) released with the paper [VideoMAE: Masked Autoencoders are Data-Efficient Learners for Self-Supervised Video Pre-Training](https://arxiv.org/abs/2203.12602) by Zhan Tong, Yibing Song, Jue Wang, Limin Wang.
|
||||||
1. **[ViLT](https://huggingface.co/docs/transformers/model_doc/vilt)** (from NAVER AI Lab/Kakao Enterprise/Kakao Brain) released with the paper [ViLT: Vision-and-Language Transformer Without Convolution or Region Supervision](https://arxiv.org/abs/2102.03334) by Wonjae Kim, Bokyung Son, Ildoo Kim.
|
1. **[ViLT](https://huggingface.co/docs/transformers/model_doc/vilt)** (from NAVER AI Lab/Kakao Enterprise/Kakao Brain) released with the paper [ViLT: Vision-and-Language Transformer Without Convolution or Region Supervision](https://arxiv.org/abs/2102.03334) by Wonjae Kim, Bokyung Son, Ildoo Kim.
|
||||||
1. **[Vision Transformer (ViT)](https://huggingface.co/docs/transformers/model_doc/vit)** (from Google AI) released with the paper [An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale](https://arxiv.org/abs/2010.11929) by Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, Neil Houlsby.
|
1. **[Vision Transformer (ViT)](https://huggingface.co/docs/transformers/model_doc/vit)** (from Google AI) released with the paper [An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale](https://arxiv.org/abs/2010.11929) by Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, Neil Houlsby.
|
||||||
1. **[VisualBERT](https://huggingface.co/docs/transformers/model_doc/visual_bert)** (from UCLA NLP) released with the paper [VisualBERT: A Simple and Performant Baseline for Vision and Language](https://arxiv.org/pdf/1908.03557) by Liunian Harold Li, Mark Yatskar, Da Yin, Cho-Jui Hsieh, Kai-Wei Chang.
|
1. **[VisualBERT](https://huggingface.co/docs/transformers/model_doc/visual_bert)** (from UCLA NLP) released with the paper [VisualBERT: A Simple and Performant Baseline for Vision and Language](https://arxiv.org/pdf/1908.03557) by Liunian Harold Li, Mark Yatskar, Da Yin, Cho-Jui Hsieh, Kai-Wei Chang.
|
||||||
|
1. **[ViT Hybrid](https://huggingface.co/docs/transformers/model_doc/vit_hybrid)** (from Google AI) released with the paper [An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale](https://arxiv.org/abs/2010.11929) by Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, Neil Houlsby.
|
||||||
1. **[ViTMAE](https://huggingface.co/docs/transformers/model_doc/vit_mae)** (from Meta AI) released with the paper [Masked Autoencoders Are Scalable Vision Learners](https://arxiv.org/abs/2111.06377) by Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Dollár, Ross Girshick.
|
1. **[ViTMAE](https://huggingface.co/docs/transformers/model_doc/vit_mae)** (from Meta AI) released with the paper [Masked Autoencoders Are Scalable Vision Learners](https://arxiv.org/abs/2111.06377) by Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Dollár, Ross Girshick.
|
||||||
1. **[ViTMSN](https://huggingface.co/docs/transformers/model_doc/vit_msn)** (from Meta AI) released with the paper [Masked Siamese Networks for Label-Efficient Learning](https://arxiv.org/abs/2204.07141) by Mahmoud Assran, Mathilde Caron, Ishan Misra, Piotr Bojanowski, Florian Bordes, Pascal Vincent, Armand Joulin, Michael Rabbat, Nicolas Ballas.
|
1. **[ViTMSN](https://huggingface.co/docs/transformers/model_doc/vit_msn)** (from Meta AI) released with the paper [Masked Siamese Networks for Label-Efficient Learning](https://arxiv.org/abs/2204.07141) by Mahmoud Assran, Mathilde Caron, Ishan Misra, Piotr Bojanowski, Florian Bordes, Pascal Vincent, Armand Joulin, Michael Rabbat, Nicolas Ballas.
|
||||||
|
1. **[ViViT](https://huggingface.co/docs/transformers/model_doc/vivit)** (from Google Research) released with the paper [ViViT: A Video Vision Transformer](https://arxiv.org/abs/2103.15691) by Anurag Arnab, Mostafa Dehghani, Georg Heigold, Chen Sun, Mario Lučić, Cordelia Schmid.
|
||||||
1. **[Wav2Vec2](https://huggingface.co/docs/transformers/model_doc/wav2vec2)** (from Facebook AI) released with the paper [wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations](https://arxiv.org/abs/2006.11477) by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli.
|
1. **[Wav2Vec2](https://huggingface.co/docs/transformers/model_doc/wav2vec2)** (from Facebook AI) released with the paper [wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations](https://arxiv.org/abs/2006.11477) by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli.
|
||||||
1. **[Wav2Vec2-Conformer](https://huggingface.co/docs/transformers/model_doc/wav2vec2-conformer)** (from Facebook AI) released with the paper [FAIRSEQ S2T: Fast Speech-to-Text Modeling with FAIRSEQ](https://arxiv.org/abs/2010.05171) by Changhan Wang, Yun Tang, Xutai Ma, Anne Wu, Sravya Popuri, Dmytro Okhonko, Juan Pino.
|
1. **[Wav2Vec2-Conformer](https://huggingface.co/docs/transformers/model_doc/wav2vec2-conformer)** (from Facebook AI) released with the paper [FAIRSEQ S2T: Fast Speech-to-Text Modeling with FAIRSEQ](https://arxiv.org/abs/2010.05171) by Changhan Wang, Yun Tang, Xutai Ma, Anne Wu, Sravya Popuri, Dmytro Okhonko, Juan Pino.
|
||||||
1. **[Wav2Vec2Phoneme](https://huggingface.co/docs/transformers/model_doc/wav2vec2_phoneme)** (from Facebook AI) released with the paper [Simple and Effective Zero-shot Cross-lingual Phoneme Recognition](https://arxiv.org/abs/2109.11680) by Qiantong Xu, Alexei Baevski, Michael Auli.
|
1. **[Wav2Vec2Phoneme](https://huggingface.co/docs/transformers/model_doc/wav2vec2_phoneme)** (from Facebook AI) released with the paper [Simple and Effective Zero-shot Cross-lingual Phoneme Recognition](https://arxiv.org/abs/2109.11680) by Qiantong Xu, Alexei Baevski, Michael Auli.
|
||||||
1. **[WavLM](https://huggingface.co/docs/transformers/model_doc/wavlm)** (from Microsoft Research) released with the paper [WavLM: Large-Scale Self-Supervised Pre-Training for Full Stack Speech Processing](https://arxiv.org/abs/2110.13900) by Sanyuan Chen, Chengyi Wang, Zhengyang Chen, Yu Wu, Shujie Liu, Zhuo Chen, Jinyu Li, Naoyuki Kanda, Takuya Yoshioka, Xiong Xiao, Jian Wu, Long Zhou, Shuo Ren, Yanmin Qian, Yao Qian, Jian Wu, Michael Zeng, Furu Wei.
|
1. **[WavLM](https://huggingface.co/docs/transformers/model_doc/wavlm)** (from Microsoft Research) released with the paper [WavLM: Large-Scale Self-Supervised Pre-Training for Full Stack Speech Processing](https://arxiv.org/abs/2110.13900) by Sanyuan Chen, Chengyi Wang, Zhengyang Chen, Yu Wu, Shujie Liu, Zhuo Chen, Jinyu Li, Naoyuki Kanda, Takuya Yoshioka, Xiong Xiao, Jian Wu, Long Zhou, Shuo Ren, Yanmin Qian, Yao Qian, Jian Wu, Michael Zeng, Furu Wei.
|
||||||
1. **[Whisper](https://huggingface.co/docs/transformers/model_doc/whisper)** (from OpenAI) released with the paper [Robust Speech Recognition via Large-Scale Weak Supervision](https://cdn.openai.com/papers/whisper.pdf) by Alec Radford, Jong Wook Kim, Tao Xu, Greg Brockman, Christine McLeavey, Ilya Sutskever.
|
1. **[Whisper](https://huggingface.co/docs/transformers/model_doc/whisper)** (from OpenAI) released with the paper [Robust Speech Recognition via Large-Scale Weak Supervision](https://cdn.openai.com/papers/whisper.pdf) by Alec Radford, Jong Wook Kim, Tao Xu, Greg Brockman, Christine McLeavey, Ilya Sutskever.
|
||||||
1. **[X-CLIP](https://huggingface.co/docs/transformers/model_doc/xclip)** (from Microsoft Research) released with the paper [Expanding Language-Image Pretrained Models for General Video Recognition](https://arxiv.org/abs/2208.02816) by Bolin Ni, Houwen Peng, Minghao Chen, Songyang Zhang, Gaofeng Meng, Jianlong Fu, Shiming Xiang, Haibin Ling.
|
1. **[X-CLIP](https://huggingface.co/docs/transformers/model_doc/xclip)** (from Microsoft Research) released with the paper [Expanding Language-Image Pretrained Models for General Video Recognition](https://arxiv.org/abs/2208.02816) by Bolin Ni, Houwen Peng, Minghao Chen, Songyang Zhang, Gaofeng Meng, Jianlong Fu, Shiming Xiang, Haibin Ling.
|
||||||
|
1. **[X-MOD](https://huggingface.co/docs/transformers/model_doc/xmod)** (from Meta AI) released with the paper [Lifting the Curse of Multilinguality by Pre-training Modular Transformers](http://dx.doi.org/10.18653/v1/2022.naacl-main.255) by Jonas Pfeiffer, Naman Goyal, Xi Lin, Xian Li, James Cross, Sebastian Riedel, Mikel Artetxe.
|
||||||
1. **[XGLM](https://huggingface.co/docs/transformers/model_doc/xglm)** (From Facebook AI) released with the paper [Few-shot Learning with Multilingual Language Models](https://arxiv.org/abs/2112.10668) by Xi Victoria Lin, Todor Mihaylov, Mikel Artetxe, Tianlu Wang, Shuohui Chen, Daniel Simig, Myle Ott, Naman Goyal, Shruti Bhosale, Jingfei Du, Ramakanth Pasunuru, Sam Shleifer, Punit Singh Koura, Vishrav Chaudhary, Brian O'Horo, Jeff Wang, Luke Zettlemoyer, Zornitsa Kozareva, Mona Diab, Veselin Stoyanov, Xian Li.
|
1. **[XGLM](https://huggingface.co/docs/transformers/model_doc/xglm)** (From Facebook AI) released with the paper [Few-shot Learning with Multilingual Language Models](https://arxiv.org/abs/2112.10668) by Xi Victoria Lin, Todor Mihaylov, Mikel Artetxe, Tianlu Wang, Shuohui Chen, Daniel Simig, Myle Ott, Naman Goyal, Shruti Bhosale, Jingfei Du, Ramakanth Pasunuru, Sam Shleifer, Punit Singh Koura, Vishrav Chaudhary, Brian O'Horo, Jeff Wang, Luke Zettlemoyer, Zornitsa Kozareva, Mona Diab, Veselin Stoyanov, Xian Li.
|
||||||
1. **[XLM](https://huggingface.co/docs/transformers/model_doc/xlm)** (from Facebook) released together with the paper [Cross-lingual Language Model Pretraining](https://arxiv.org/abs/1901.07291) by Guillaume Lample and Alexis Conneau.
|
1. **[XLM](https://huggingface.co/docs/transformers/model_doc/xlm)** (from Facebook) released together with the paper [Cross-lingual Language Model Pretraining](https://arxiv.org/abs/1901.07291) by Guillaume Lample and Alexis Conneau.
|
||||||
1. **[XLM-ProphetNet](https://huggingface.co/docs/transformers/model_doc/xlm-prophetnet)** (from Microsoft Research) released with the paper [ProphetNet: Predicting Future N-gram for Sequence-to-Sequence Pre-training](https://arxiv.org/abs/2001.04063) by Yu Yan, Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang and Ming Zhou.
|
1. **[XLM-ProphetNet](https://huggingface.co/docs/transformers/model_doc/xlm-prophetnet)** (from Microsoft Research) released with the paper [ProphetNet: Predicting Future N-gram for Sequence-to-Sequence Pre-training](https://arxiv.org/abs/2001.04063) by Yu Yan, Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang and Ming Zhou.
|
||||||
1. **[XLM-RoBERTa](https://huggingface.co/docs/transformers/model_doc/xlm-roberta)** (from Facebook AI), released together with the paper [Unsupervised Cross-lingual Representation Learning at Scale](https://arxiv.org/abs/1911.02116) by Alexis Conneau*, Kartikay Khandelwal*, Naman Goyal, Vishrav Chaudhary, Guillaume Wenzek, Francisco Guzmán, Edouard Grave, Myle Ott, Luke Zettlemoyer and Veselin Stoyanov.
|
1. **[XLM-RoBERTa](https://huggingface.co/docs/transformers/model_doc/xlm-roberta)** (from Facebook AI), released together with the paper [Unsupervised Cross-lingual Representation Learning at Scale](https://arxiv.org/abs/1911.02116) by Alexis Conneau*, Kartikay Khandelwal*, Naman Goyal, Vishrav Chaudhary, Guillaume Wenzek, Francisco Guzmán, Edouard Grave, Myle Ott, Luke Zettlemoyer and Veselin Stoyanov.
|
||||||
1. **[XLM-RoBERTa-XL](https://huggingface.co/docs/transformers/model_doc/xlm-roberta-xl)** (from Facebook AI), released together with the paper [Larger-Scale Transformers for Multilingual Masked Language Modeling](https://arxiv.org/abs/2105.00572) by Naman Goyal, Jingfei Du, Myle Ott, Giri Anantharaman, Alexis Conneau.
|
1. **[XLM-RoBERTa-XL](https://huggingface.co/docs/transformers/model_doc/xlm-roberta-xl)** (from Facebook AI), released together with the paper [Larger-Scale Transformers for Multilingual Masked Language Modeling](https://arxiv.org/abs/2105.00572) by Naman Goyal, Jingfei Du, Myle Ott, Giri Anantharaman, Alexis Conneau.
|
||||||
|
1. **[XLM-V](https://huggingface.co/docs/transformers/model_doc/xlm-v)** (from Meta AI) released with the paper [XLM-V: Overcoming the Vocabulary Bottleneck in Multilingual Masked Language Models](https://arxiv.org/abs/2301.10472) by Davis Liang, Hila Gonen, Yuning Mao, Rui Hou, Naman Goyal, Marjan Ghazvininejad, Luke Zettlemoyer, Madian Khabsa.
|
||||||
1. **[XLNet](https://huggingface.co/docs/transformers/model_doc/xlnet)** (from Google/CMU) released with the paper [XLNet: Generalized Autoregressive Pretraining for Language Understanding](https://arxiv.org/abs/1906.08237) by Zhilin Yang*, Zihang Dai*, Yiming Yang, Jaime Carbonell, Ruslan Salakhutdinov, Quoc V. Le.
|
1. **[XLNet](https://huggingface.co/docs/transformers/model_doc/xlnet)** (from Google/CMU) released with the paper [XLNet: Generalized Autoregressive Pretraining for Language Understanding](https://arxiv.org/abs/1906.08237) by Zhilin Yang*, Zihang Dai*, Yiming Yang, Jaime Carbonell, Ruslan Salakhutdinov, Quoc V. Le.
|
||||||
1. **[XLS-R](https://huggingface.co/docs/transformers/model_doc/xls_r)** (from Facebook AI) released with the paper [XLS-R: Self-supervised Cross-lingual Speech Representation Learning at Scale](https://arxiv.org/abs/2111.09296) by Arun Babu, Changhan Wang, Andros Tjandra, Kushal Lakhotia, Qiantong Xu, Naman Goyal, Kritika Singh, Patrick von Platen, Yatharth Saraf, Juan Pino, Alexei Baevski, Alexis Conneau, Michael Auli.
|
1. **[XLS-R](https://huggingface.co/docs/transformers/model_doc/xls_r)** (from Facebook AI) released with the paper [XLS-R: Self-supervised Cross-lingual Speech Representation Learning at Scale](https://arxiv.org/abs/2111.09296) by Arun Babu, Changhan Wang, Andros Tjandra, Kushal Lakhotia, Qiantong Xu, Naman Goyal, Kritika Singh, Patrick von Platen, Yatharth Saraf, Juan Pino, Alexei Baevski, Alexis Conneau, Michael Auli.
|
||||||
1. **[XLSR-Wav2Vec2](https://huggingface.co/docs/transformers/model_doc/xlsr_wav2vec2)** (from Facebook AI) released with the paper [Unsupervised Cross-Lingual Representation Learning For Speech Recognition](https://arxiv.org/abs/2006.13979) by Alexis Conneau, Alexei Baevski, Ronan Collobert, Abdelrahman Mohamed, Michael Auli.
|
1. **[XLSR-Wav2Vec2](https://huggingface.co/docs/transformers/model_doc/xlsr_wav2vec2)** (from Facebook AI) released with the paper [Unsupervised Cross-Lingual Representation Learning For Speech Recognition](https://arxiv.org/abs/2006.13979) by Alexis Conneau, Alexei Baevski, Ronan Collobert, Abdelrahman Mohamed, Michael Auli.
|
||||||
@ -424,7 +518,6 @@ These implementations have been tested on several datasets (see the example scri
|
|||||||
| [Training and fine-tuning](https://huggingface.co/docs/transformers/training) | Using the models provided by 🤗 Transformers in a PyTorch/TensorFlow training loop and the `Trainer` API |
|
| [Training and fine-tuning](https://huggingface.co/docs/transformers/training) | Using the models provided by 🤗 Transformers in a PyTorch/TensorFlow training loop and the `Trainer` API |
|
||||||
| [Quick tour: Fine-tuning/usage scripts](https://github.com/huggingface/transformers/tree/main/examples) | Example scripts for fine-tuning models on a wide range of tasks |
|
| [Quick tour: Fine-tuning/usage scripts](https://github.com/huggingface/transformers/tree/main/examples) | Example scripts for fine-tuning models on a wide range of tasks |
|
||||||
| [Model sharing and uploading](https://huggingface.co/docs/transformers/model_sharing) | Upload and share your fine-tuned models with the community |
|
| [Model sharing and uploading](https://huggingface.co/docs/transformers/model_sharing) | Upload and share your fine-tuned models with the community |
|
||||||
| [Migration](https://huggingface.co/docs/transformers/migration) | Migrate to 🤗 Transformers from `pytorch-transformers` or `pytorch-pretrained-bert` |
|
|
||||||
|
|
||||||
## Citation
|
## Citation
|
||||||
|
|
||||||
|
|||||||
85
README_es.md
85
README_es.md
@ -44,7 +44,9 @@ limitations under the License.
|
|||||||
<a href="https://github.com/huggingface/transformers/blob/main/README_zh-hans.md">简体中文</a> |
|
<a href="https://github.com/huggingface/transformers/blob/main/README_zh-hans.md">简体中文</a> |
|
||||||
<a href="https://github.com/huggingface/transformers/blob/main/README_zh-hant.md">繁體中文</a> |
|
<a href="https://github.com/huggingface/transformers/blob/main/README_zh-hant.md">繁體中文</a> |
|
||||||
<a href="https://github.com/huggingface/transformers/blob/main/README_ko.md">한국어</a> |
|
<a href="https://github.com/huggingface/transformers/blob/main/README_ko.md">한국어</a> |
|
||||||
<b>Español</b>
|
<b>Español</b> |
|
||||||
|
<a href="https://github.com/huggingface/transformers/blob/main/README_ja.md">日本語</a> |
|
||||||
|
<a href="https://github.com/huggingface/transformers/blob/main/README_hd.md">हिन्दी</a>
|
||||||
<p>
|
<p>
|
||||||
</h4>
|
</h4>
|
||||||
|
|
||||||
@ -56,13 +58,13 @@ limitations under the License.
|
|||||||
<a href="https://hf.co/course"><img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/course_banner.png"></a>
|
<a href="https://hf.co/course"><img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/course_banner.png"></a>
|
||||||
</h3>
|
</h3>
|
||||||
|
|
||||||
🤗 Transformers aporta miles de modelos preentrenados Para realizar tareas en diferentes modalidades como texto, vision, y audio.
|
🤗 Transformers aporta miles de modelos preentrenados Para realizar tareas en diferentes modalidades como texto, vision, y audio.
|
||||||
|
|
||||||
Estos modelos pueden ser aplicados en:
|
Estos modelos pueden ser aplicados en:
|
||||||
|
|
||||||
* 📝 Texto, Para tareas como clasificación de texto, extracción de información, responder preguntas, resumir, traducir, generación de texto, en más de 100 idiomas.
|
* 📝 Texto, Para tareas como clasificación de texto, extracción de información, responder preguntas, resumir, traducir, generación de texto, en más de 100 idiomas.
|
||||||
* 🖼️ Imágenes, para tareas como clasificación de imágenes, detección the objetos, y segmentación.
|
* 🖼️ Imágenes, para tareas como clasificación de imágenes, detección the objetos, y segmentación.
|
||||||
* 🗣️ Audio, para tareas como reconocimiento de voz y clasificación de audio.
|
* 🗣️ Audio, para tareas como reconocimiento de voz y clasificación de audio.
|
||||||
|
|
||||||
Los modelos de Transformer también pueden realizar tareas en **muchas modalidades combinadas**, como responder pregunstas, reconocimiento de carácteres ópticos,extracción de información de documentos escaneados, clasificación de video, y respuesta de preguntas visuales.
|
Los modelos de Transformer también pueden realizar tareas en **muchas modalidades combinadas**, como responder pregunstas, reconocimiento de carácteres ópticos,extracción de información de documentos escaneados, clasificación de video, y respuesta de preguntas visuales.
|
||||||
|
|
||||||
@ -90,6 +92,7 @@ En visión de ordenador:
|
|||||||
- [Detección de objetos con DETR](https://huggingface.co/facebook/detr-resnet-50)
|
- [Detección de objetos con DETR](https://huggingface.co/facebook/detr-resnet-50)
|
||||||
- [Segmentación semántica con SegFormer](https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512)
|
- [Segmentación semántica con SegFormer](https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512)
|
||||||
- [Segmentación panóptica con DETR](https://huggingface.co/facebook/detr-resnet-50-panoptic)
|
- [Segmentación panóptica con DETR](https://huggingface.co/facebook/detr-resnet-50-panoptic)
|
||||||
|
- [Segmentación Universal con OneFormer (Segmentación Semántica, de Instancia y Panóptica con un solo modelo)](https://huggingface.co/shi-labs/oneformer_ade20k_dinat_large)
|
||||||
|
|
||||||
En Audio:
|
En Audio:
|
||||||
- [Reconocimiento de voz automático con Wav2Vec2](https://huggingface.co/facebook/wav2vec2-base-960h)
|
- [Reconocimiento de voz automático con Wav2Vec2](https://huggingface.co/facebook/wav2vec2-base-960h)
|
||||||
@ -261,6 +264,11 @@ Número actual de puntos de control:  para un resumen de alto nivel de cada uno de ellas.):
|
🤗 Transformers actualmente proporciona las siguientes arquitecturas (ver [aquí](https://huggingface.co/docs/transformers/model_summary) para un resumen de alto nivel de cada uno de ellas.):
|
||||||
|
|
||||||
1. **[ALBERT](https://huggingface.co/docs/transformers/model_doc/albert)** (from Google Research and the Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.
|
1. **[ALBERT](https://huggingface.co/docs/transformers/model_doc/albert)** (from Google Research and the Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.
|
||||||
|
1. **[ALIGN](https://huggingface.co/docs/transformers/model_doc/align)** (from Google Research) released with the paper [Scaling Up Visual and Vision-Language Representation Learning With Noisy Text Supervision](https://arxiv.org/abs/2102.05918) by Chao Jia, Yinfei Yang, Ye Xia, Yi-Ting Chen, Zarana Parekh, Hieu Pham, Quoc V. Le, Yunhsuan Sung, Zhen Li, Tom Duerig.
|
||||||
|
1. **[AltCLIP](https://huggingface.co/docs/transformers/model_doc/altclip)** (from BAAI) released with the paper [AltCLIP: Altering the Language Encoder in CLIP for Extended Language Capabilities](https://arxiv.org/abs/2211.06679) by Chen, Zhongzhi and Liu, Guang and Zhang, Bo-Wen and Ye, Fulong and Yang, Qinghong and Wu, Ledell.
|
||||||
|
1. **[Audio Spectrogram Transformer](https://huggingface.co/docs/transformers/model_doc/audio-spectrogram-transformer)** (from MIT) released with the paper [AST: Audio Spectrogram Transformer](https://arxiv.org/abs/2104.01778) by Yuan Gong, Yu-An Chung, James Glass.
|
||||||
|
1. **[Autoformer](https://huggingface.co/docs/transformers/model_doc/autoformer)** (from Tsinghua University) released with the paper [Autoformer: Decomposition Transformers with Auto-Correlation for Long-Term Series Forecasting](https://arxiv.org/abs/2106.13008) by Haixu Wu, Jiehui Xu, Jianmin Wang, Mingsheng Long.
|
||||||
|
1. **[Bark](https://huggingface.co/docs/transformers/model_doc/bark)** (from Suno) released in the repository [suno-ai/bark](https://github.com/suno-ai/bark) by Suno AI team.
|
||||||
1. **[BART](https://huggingface.co/docs/transformers/model_doc/bart)** (from Facebook) released with the paper [BART: Denoising Sequence-to-Sequence Pre-training for Natural Language Generation, Translation, and Comprehension](https://arxiv.org/abs/1910.13461) by Mike Lewis, Yinhan Liu, Naman Goyal, Marjan Ghazvininejad, Abdelrahman Mohamed, Omer Levy, Ves Stoyanov and Luke Zettlemoyer.
|
1. **[BART](https://huggingface.co/docs/transformers/model_doc/bart)** (from Facebook) released with the paper [BART: Denoising Sequence-to-Sequence Pre-training for Natural Language Generation, Translation, and Comprehension](https://arxiv.org/abs/1910.13461) by Mike Lewis, Yinhan Liu, Naman Goyal, Marjan Ghazvininejad, Abdelrahman Mohamed, Omer Levy, Ves Stoyanov and Luke Zettlemoyer.
|
||||||
1. **[BARThez](https://huggingface.co/docs/transformers/model_doc/barthez)** (from École polytechnique) released with the paper [BARThez: a Skilled Pretrained French Sequence-to-Sequence Model](https://arxiv.org/abs/2010.12321) by Moussa Kamal Eddine, Antoine J.-P. Tixier, Michalis Vazirgiannis.
|
1. **[BARThez](https://huggingface.co/docs/transformers/model_doc/barthez)** (from École polytechnique) released with the paper [BARThez: a Skilled Pretrained French Sequence-to-Sequence Model](https://arxiv.org/abs/2010.12321) by Moussa Kamal Eddine, Antoine J.-P. Tixier, Michalis Vazirgiannis.
|
||||||
1. **[BARTpho](https://huggingface.co/docs/transformers/model_doc/bartpho)** (from VinAI Research) released with the paper [BARTpho: Pre-trained Sequence-to-Sequence Models for Vietnamese](https://arxiv.org/abs/2109.09701) by Nguyen Luong Tran, Duong Minh Le and Dat Quoc Nguyen.
|
1. **[BARTpho](https://huggingface.co/docs/transformers/model_doc/bartpho)** (from VinAI Research) released with the paper [BARTpho: Pre-trained Sequence-to-Sequence Models for Vietnamese](https://arxiv.org/abs/2109.09701) by Nguyen Luong Tran, Duong Minh Le and Dat Quoc Nguyen.
|
||||||
@ -270,19 +278,29 @@ Número actual de puntos de control: ** (from VinAI Research) released with the paper [BERTweet: A pre-trained language model for English Tweets](https://aclanthology.org/2020.emnlp-demos.2/) by Dat Quoc Nguyen, Thanh Vu and Anh Tuan Nguyen.
|
1. **[BERTweet](https://huggingface.co/docs/transformers/model_doc/bertweet)** (from VinAI Research) released with the paper [BERTweet: A pre-trained language model for English Tweets](https://aclanthology.org/2020.emnlp-demos.2/) by Dat Quoc Nguyen, Thanh Vu and Anh Tuan Nguyen.
|
||||||
1. **[BigBird-Pegasus](https://huggingface.co/docs/transformers/model_doc/bigbird_pegasus)** (from Google Research) released with the paper [Big Bird: Transformers for Longer Sequences](https://arxiv.org/abs/2007.14062) by Manzil Zaheer, Guru Guruganesh, Avinava Dubey, Joshua Ainslie, Chris Alberti, Santiago Ontanon, Philip Pham, Anirudh Ravula, Qifan Wang, Li Yang, Amr Ahmed.
|
1. **[BigBird-Pegasus](https://huggingface.co/docs/transformers/model_doc/bigbird_pegasus)** (from Google Research) released with the paper [Big Bird: Transformers for Longer Sequences](https://arxiv.org/abs/2007.14062) by Manzil Zaheer, Guru Guruganesh, Avinava Dubey, Joshua Ainslie, Chris Alberti, Santiago Ontanon, Philip Pham, Anirudh Ravula, Qifan Wang, Li Yang, Amr Ahmed.
|
||||||
1. **[BigBird-RoBERTa](https://huggingface.co/docs/transformers/model_doc/big_bird)** (from Google Research) released with the paper [Big Bird: Transformers for Longer Sequences](https://arxiv.org/abs/2007.14062) by Manzil Zaheer, Guru Guruganesh, Avinava Dubey, Joshua Ainslie, Chris Alberti, Santiago Ontanon, Philip Pham, Anirudh Ravula, Qifan Wang, Li Yang, Amr Ahmed.
|
1. **[BigBird-RoBERTa](https://huggingface.co/docs/transformers/model_doc/big_bird)** (from Google Research) released with the paper [Big Bird: Transformers for Longer Sequences](https://arxiv.org/abs/2007.14062) by Manzil Zaheer, Guru Guruganesh, Avinava Dubey, Joshua Ainslie, Chris Alberti, Santiago Ontanon, Philip Pham, Anirudh Ravula, Qifan Wang, Li Yang, Amr Ahmed.
|
||||||
|
1. **[BioGpt](https://huggingface.co/docs/transformers/model_doc/biogpt)** (from Microsoft Research AI4Science) released with the paper [BioGPT: generative pre-trained transformer for biomedical text generation and mining](https://academic.oup.com/bib/advance-article/doi/10.1093/bib/bbac409/6713511?guestAccessKey=a66d9b5d-4f83-4017-bb52-405815c907b9) by Renqian Luo, Liai Sun, Yingce Xia, Tao Qin, Sheng Zhang, Hoifung Poon and Tie-Yan Liu.
|
||||||
|
1. **[BiT](https://huggingface.co/docs/transformers/model_doc/bit)** (from Google AI) released with the paper [Big Transfer (BiT) by Alexander Kolesnikov, Lucas Beyer, Xiaohua Zhai, Joan Puigcerver, Jessica Yung, Sylvain Gelly, Neil Houlsby.
|
||||||
1. **[Blenderbot](https://huggingface.co/docs/transformers/model_doc/blenderbot)** (from Facebook) released with the paper [Recipes for building an open-domain chatbot](https://arxiv.org/abs/2004.13637) by Stephen Roller, Emily Dinan, Naman Goyal, Da Ju, Mary Williamson, Yinhan Liu, Jing Xu, Myle Ott, Kurt Shuster, Eric M. Smith, Y-Lan Boureau, Jason Weston.
|
1. **[Blenderbot](https://huggingface.co/docs/transformers/model_doc/blenderbot)** (from Facebook) released with the paper [Recipes for building an open-domain chatbot](https://arxiv.org/abs/2004.13637) by Stephen Roller, Emily Dinan, Naman Goyal, Da Ju, Mary Williamson, Yinhan Liu, Jing Xu, Myle Ott, Kurt Shuster, Eric M. Smith, Y-Lan Boureau, Jason Weston.
|
||||||
1. **[BlenderbotSmall](https://huggingface.co/docs/transformers/model_doc/blenderbot-small)** (from Facebook) released with the paper [Recipes for building an open-domain chatbot](https://arxiv.org/abs/2004.13637) by Stephen Roller, Emily Dinan, Naman Goyal, Da Ju, Mary Williamson, Yinhan Liu, Jing Xu, Myle Ott, Kurt Shuster, Eric M. Smith, Y-Lan Boureau, Jason Weston.
|
1. **[BlenderbotSmall](https://huggingface.co/docs/transformers/model_doc/blenderbot-small)** (from Facebook) released with the paper [Recipes for building an open-domain chatbot](https://arxiv.org/abs/2004.13637) by Stephen Roller, Emily Dinan, Naman Goyal, Da Ju, Mary Williamson, Yinhan Liu, Jing Xu, Myle Ott, Kurt Shuster, Eric M. Smith, Y-Lan Boureau, Jason Weston.
|
||||||
1. **[BLOOM](https://huggingface.co/docs/transformers/model_doc/bloom)** (from BigScience workshop) released by the [BigSicence Workshop](https://bigscience.huggingface.co/).
|
1. **[BLIP](https://huggingface.co/docs/transformers/model_doc/blip)** (from Salesforce) released with the paper [BLIP: Bootstrapping Language-Image Pre-training for Unified Vision-Language Understanding and Generation](https://arxiv.org/abs/2201.12086) by Junnan Li, Dongxu Li, Caiming Xiong, Steven Hoi.
|
||||||
|
1. **[BLIP-2](https://huggingface.co/docs/transformers/model_doc/blip-2)** (from Salesforce) released with the paper [BLIP-2: Bootstrapping Language-Image Pre-training with Frozen Image Encoders and Large Language Models](https://arxiv.org/abs/2301.12597) by Junnan Li, Dongxu Li, Silvio Savarese, Steven Hoi.
|
||||||
|
1. **[BLOOM](https://huggingface.co/docs/transformers/model_doc/bloom)** (from BigScience workshop) released by the [BigScience Workshop](https://bigscience.huggingface.co/).
|
||||||
1. **[BORT](https://huggingface.co/docs/transformers/model_doc/bort)** (from Alexa) released with the paper [Optimal Subarchitecture Extraction For BERT](https://arxiv.org/abs/2010.10499) by Adrian de Wynter and Daniel J. Perry.
|
1. **[BORT](https://huggingface.co/docs/transformers/model_doc/bort)** (from Alexa) released with the paper [Optimal Subarchitecture Extraction For BERT](https://arxiv.org/abs/2010.10499) by Adrian de Wynter and Daniel J. Perry.
|
||||||
|
1. **[BridgeTower](https://huggingface.co/docs/transformers/model_doc/bridgetower)** (from Harbin Institute of Technology/Microsoft Research Asia/Intel Labs) released with the paper [BridgeTower: Building Bridges Between Encoders in Vision-Language Representation Learning](https://arxiv.org/abs/2206.08657) by Xiao Xu, Chenfei Wu, Shachar Rosenman, Vasudev Lal, Wanxiang Che, Nan Duan.
|
||||||
1. **[ByT5](https://huggingface.co/docs/transformers/model_doc/byt5)** (from Google Research) released with the paper [ByT5: Towards a token-free future with pre-trained byte-to-byte models](https://arxiv.org/abs/2105.13626) by Linting Xue, Aditya Barua, Noah Constant, Rami Al-Rfou, Sharan Narang, Mihir Kale, Adam Roberts, Colin Raffel.
|
1. **[ByT5](https://huggingface.co/docs/transformers/model_doc/byt5)** (from Google Research) released with the paper [ByT5: Towards a token-free future with pre-trained byte-to-byte models](https://arxiv.org/abs/2105.13626) by Linting Xue, Aditya Barua, Noah Constant, Rami Al-Rfou, Sharan Narang, Mihir Kale, Adam Roberts, Colin Raffel.
|
||||||
1. **[CamemBERT](https://huggingface.co/docs/transformers/model_doc/camembert)** (from Inria/Facebook/Sorbonne) released with the paper [CamemBERT: a Tasty French Language Model](https://arxiv.org/abs/1911.03894) by Louis Martin*, Benjamin Muller*, Pedro Javier Ortiz Suárez*, Yoann Dupont, Laurent Romary, Éric Villemonte de la Clergerie, Djamé Seddah and Benoît Sagot.
|
1. **[CamemBERT](https://huggingface.co/docs/transformers/model_doc/camembert)** (from Inria/Facebook/Sorbonne) released with the paper [CamemBERT: a Tasty French Language Model](https://arxiv.org/abs/1911.03894) by Louis Martin*, Benjamin Muller*, Pedro Javier Ortiz Suárez*, Yoann Dupont, Laurent Romary, Éric Villemonte de la Clergerie, Djamé Seddah and Benoît Sagot.
|
||||||
1. **[CANINE](https://huggingface.co/docs/transformers/model_doc/canine)** (from Google Research) released with the paper [CANINE: Pre-training an Efficient Tokenization-Free Encoder for Language Representation](https://arxiv.org/abs/2103.06874) by Jonathan H. Clark, Dan Garrette, Iulia Turc, John Wieting.
|
1. **[CANINE](https://huggingface.co/docs/transformers/model_doc/canine)** (from Google Research) released with the paper [CANINE: Pre-training an Efficient Tokenization-Free Encoder for Language Representation](https://arxiv.org/abs/2103.06874) by Jonathan H. Clark, Dan Garrette, Iulia Turc, John Wieting.
|
||||||
|
1. **[Chinese-CLIP](https://huggingface.co/docs/transformers/model_doc/chinese_clip)** (from OFA-Sys) released with the paper [Chinese CLIP: Contrastive Vision-Language Pretraining in Chinese](https://arxiv.org/abs/2211.01335) by An Yang, Junshu Pan, Junyang Lin, Rui Men, Yichang Zhang, Jingren Zhou, Chang Zhou.
|
||||||
|
1. **[CLAP](https://huggingface.co/docs/transformers/model_doc/clap)** (from LAION-AI) released with the paper [Large-scale Contrastive Language-Audio Pretraining with Feature Fusion and Keyword-to-Caption Augmentation](https://arxiv.org/abs/2211.06687) by Yusong Wu, Ke Chen, Tianyu Zhang, Yuchen Hui, Taylor Berg-Kirkpatrick, Shlomo Dubnov.
|
||||||
1. **[CLIP](https://huggingface.co/docs/transformers/model_doc/clip)** (from OpenAI) released with the paper [Learning Transferable Visual Models From Natural Language Supervision](https://arxiv.org/abs/2103.00020) by Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, Ilya Sutskever.
|
1. **[CLIP](https://huggingface.co/docs/transformers/model_doc/clip)** (from OpenAI) released with the paper [Learning Transferable Visual Models From Natural Language Supervision](https://arxiv.org/abs/2103.00020) by Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, Ilya Sutskever.
|
||||||
|
1. **[CLIPSeg](https://huggingface.co/docs/transformers/model_doc/clipseg)** (from University of Göttingen) released with the paper [Image Segmentation Using Text and Image Prompts](https://arxiv.org/abs/2112.10003) by Timo Lüddecke and Alexander Ecker.
|
||||||
1. **[CodeGen](https://huggingface.co/docs/transformers/model_doc/codegen)** (from Salesforce) released with the paper [A Conversational Paradigm for Program Synthesis](https://arxiv.org/abs/2203.13474) by Erik Nijkamp, Bo Pang, Hiroaki Hayashi, Lifu Tu, Huan Wang, Yingbo Zhou, Silvio Savarese, Caiming Xiong.
|
1. **[CodeGen](https://huggingface.co/docs/transformers/model_doc/codegen)** (from Salesforce) released with the paper [A Conversational Paradigm for Program Synthesis](https://arxiv.org/abs/2203.13474) by Erik Nijkamp, Bo Pang, Hiroaki Hayashi, Lifu Tu, Huan Wang, Yingbo Zhou, Silvio Savarese, Caiming Xiong.
|
||||||
1. **[Conditional DETR](https://huggingface.co/docs/transformers/model_doc/conditional_detr)** (from Microsoft Research Asia) released with the paper [Conditional DETR for Fast Training Convergence](https://arxiv.org/abs/2108.06152) by Depu Meng, Xiaokang Chen, Zejia Fan, Gang Zeng, Houqiang Li, Yuhui Yuan, Lei Sun, Jingdong Wang.
|
1. **[Conditional DETR](https://huggingface.co/docs/transformers/model_doc/conditional_detr)** (from Microsoft Research Asia) released with the paper [Conditional DETR for Fast Training Convergence](https://arxiv.org/abs/2108.06152) by Depu Meng, Xiaokang Chen, Zejia Fan, Gang Zeng, Houqiang Li, Yuhui Yuan, Lei Sun, Jingdong Wang.
|
||||||
1. **[ConvBERT](https://huggingface.co/docs/transformers/model_doc/convbert)** (from YituTech) released with the paper [ConvBERT: Improving BERT with Span-based Dynamic Convolution](https://arxiv.org/abs/2008.02496) by Zihang Jiang, Weihao Yu, Daquan Zhou, Yunpeng Chen, Jiashi Feng, Shuicheng Yan.
|
1. **[ConvBERT](https://huggingface.co/docs/transformers/model_doc/convbert)** (from YituTech) released with the paper [ConvBERT: Improving BERT with Span-based Dynamic Convolution](https://arxiv.org/abs/2008.02496) by Zihang Jiang, Weihao Yu, Daquan Zhou, Yunpeng Chen, Jiashi Feng, Shuicheng Yan.
|
||||||
1. **[ConvNeXT](https://huggingface.co/docs/transformers/model_doc/convnext)** (from Facebook AI) released with the paper [A ConvNet for the 2020s](https://arxiv.org/abs/2201.03545) by Zhuang Liu, Hanzi Mao, Chao-Yuan Wu, Christoph Feichtenhofer, Trevor Darrell, Saining Xie.
|
1. **[ConvNeXT](https://huggingface.co/docs/transformers/model_doc/convnext)** (from Facebook AI) released with the paper [A ConvNet for the 2020s](https://arxiv.org/abs/2201.03545) by Zhuang Liu, Hanzi Mao, Chao-Yuan Wu, Christoph Feichtenhofer, Trevor Darrell, Saining Xie.
|
||||||
|
1. **[ConvNeXTV2](https://huggingface.co/docs/transformers/model_doc/convnextv2)** (from Facebook AI) released with the paper [ConvNeXt V2: Co-designing and Scaling ConvNets with Masked Autoencoders](https://arxiv.org/abs/2301.00808) by Sanghyun Woo, Shoubhik Debnath, Ronghang Hu, Xinlei Chen, Zhuang Liu, In So Kweon, Saining Xie.
|
||||||
1. **[CPM](https://huggingface.co/docs/transformers/model_doc/cpm)** (from Tsinghua University) released with the paper [CPM: A Large-scale Generative Chinese Pre-trained Language Model](https://arxiv.org/abs/2012.00413) by Zhengyan Zhang, Xu Han, Hao Zhou, Pei Ke, Yuxian Gu, Deming Ye, Yujia Qin, Yusheng Su, Haozhe Ji, Jian Guan, Fanchao Qi, Xiaozhi Wang, Yanan Zheng, Guoyang Zeng, Huanqi Cao, Shengqi Chen, Daixuan Li, Zhenbo Sun, Zhiyuan Liu, Minlie Huang, Wentao Han, Jie Tang, Juanzi Li, Xiaoyan Zhu, Maosong Sun.
|
1. **[CPM](https://huggingface.co/docs/transformers/model_doc/cpm)** (from Tsinghua University) released with the paper [CPM: A Large-scale Generative Chinese Pre-trained Language Model](https://arxiv.org/abs/2012.00413) by Zhengyan Zhang, Xu Han, Hao Zhou, Pei Ke, Yuxian Gu, Deming Ye, Yujia Qin, Yusheng Su, Haozhe Ji, Jian Guan, Fanchao Qi, Xiaozhi Wang, Yanan Zheng, Guoyang Zeng, Huanqi Cao, Shengqi Chen, Daixuan Li, Zhenbo Sun, Zhiyuan Liu, Minlie Huang, Wentao Han, Jie Tang, Juanzi Li, Xiaoyan Zhu, Maosong Sun.
|
||||||
|
1. **[CPM-Ant](https://huggingface.co/docs/transformers/model_doc/cpmant)** (from OpenBMB) released by the [OpenBMB](https://www.openbmb.org/).
|
||||||
1. **[CTRL](https://huggingface.co/docs/transformers/model_doc/ctrl)** (from Salesforce) released with the paper [CTRL: A Conditional Transformer Language Model for Controllable Generation](https://arxiv.org/abs/1909.05858) by Nitish Shirish Keskar*, Bryan McCann*, Lav R. Varshney, Caiming Xiong and Richard Socher.
|
1. **[CTRL](https://huggingface.co/docs/transformers/model_doc/ctrl)** (from Salesforce) released with the paper [CTRL: A Conditional Transformer Language Model for Controllable Generation](https://arxiv.org/abs/1909.05858) by Nitish Shirish Keskar*, Bryan McCann*, Lav R. Varshney, Caiming Xiong and Richard Socher.
|
||||||
1. **[CvT](https://huggingface.co/docs/transformers/model_doc/cvt)** (from Microsoft) released with the paper [CvT: Introducing Convolutions to Vision Transformers](https://arxiv.org/abs/2103.15808) by Haiping Wu, Bin Xiao, Noel Codella, Mengchen Liu, Xiyang Dai, Lu Yuan, Lei Zhang.
|
1. **[CvT](https://huggingface.co/docs/transformers/model_doc/cvt)** (from Microsoft) released with the paper [CvT: Introducing Convolutions to Vision Transformers](https://arxiv.org/abs/2103.15808) by Haiping Wu, Bin Xiao, Noel Codella, Mengchen Liu, Xiyang Dai, Lu Yuan, Lei Zhang.
|
||||||
1. **[Data2Vec](https://huggingface.co/docs/transformers/model_doc/data2vec)** (from Facebook) released with the paper [Data2Vec: A General Framework for Self-supervised Learning in Speech, Vision and Language](https://arxiv.org/abs/2202.03555) by Alexei Baevski, Wei-Ning Hsu, Qiantong Xu, Arun Babu, Jiatao Gu, Michael Auli.
|
1. **[Data2Vec](https://huggingface.co/docs/transformers/model_doc/data2vec)** (from Facebook) released with the paper [Data2Vec: A General Framework for Self-supervised Learning in Speech, Vision and Language](https://arxiv.org/abs/2202.03555) by Alexei Baevski, Wei-Ning Hsu, Qiantong Xu, Arun Babu, Jiatao Gu, Michael Auli.
|
||||||
@ -291,21 +309,33 @@ Número actual de puntos de control: ** (from Berkeley/Facebook/Google) released with the paper [Decision Transformer: Reinforcement Learning via Sequence Modeling](https://arxiv.org/abs/2106.01345) by Lili Chen, Kevin Lu, Aravind Rajeswaran, Kimin Lee, Aditya Grover, Michael Laskin, Pieter Abbeel, Aravind Srinivas, Igor Mordatch.
|
1. **[Decision Transformer](https://huggingface.co/docs/transformers/model_doc/decision_transformer)** (from Berkeley/Facebook/Google) released with the paper [Decision Transformer: Reinforcement Learning via Sequence Modeling](https://arxiv.org/abs/2106.01345) by Lili Chen, Kevin Lu, Aravind Rajeswaran, Kimin Lee, Aditya Grover, Michael Laskin, Pieter Abbeel, Aravind Srinivas, Igor Mordatch.
|
||||||
1. **[Deformable DETR](https://huggingface.co/docs/transformers/model_doc/deformable_detr)** (from SenseTime Research) released with the paper [Deformable DETR: Deformable Transformers for End-to-End Object Detection](https://arxiv.org/abs/2010.04159) by Xizhou Zhu, Weijie Su, Lewei Lu, Bin Li, Xiaogang Wang, Jifeng Dai.
|
1. **[Deformable DETR](https://huggingface.co/docs/transformers/model_doc/deformable_detr)** (from SenseTime Research) released with the paper [Deformable DETR: Deformable Transformers for End-to-End Object Detection](https://arxiv.org/abs/2010.04159) by Xizhou Zhu, Weijie Su, Lewei Lu, Bin Li, Xiaogang Wang, Jifeng Dai.
|
||||||
1. **[DeiT](https://huggingface.co/docs/transformers/model_doc/deit)** (from Facebook) released with the paper [Training data-efficient image transformers & distillation through attention](https://arxiv.org/abs/2012.12877) by Hugo Touvron, Matthieu Cord, Matthijs Douze, Francisco Massa, Alexandre Sablayrolles, Hervé Jégou.
|
1. **[DeiT](https://huggingface.co/docs/transformers/model_doc/deit)** (from Facebook) released with the paper [Training data-efficient image transformers & distillation through attention](https://arxiv.org/abs/2012.12877) by Hugo Touvron, Matthieu Cord, Matthijs Douze, Francisco Massa, Alexandre Sablayrolles, Hervé Jégou.
|
||||||
|
1. **[DePlot](https://huggingface.co/docs/transformers/model_doc/deplot)** (from Google AI) released with the paper [DePlot: One-shot visual language reasoning by plot-to-table translation](https://arxiv.org/abs/2212.10505) by Fangyu Liu, Julian Martin Eisenschlos, Francesco Piccinno, Syrine Krichene, Chenxi Pang, Kenton Lee, Mandar Joshi, Wenhu Chen, Nigel Collier, Yasemin Altun.
|
||||||
|
1. **[DETA](https://huggingface.co/docs/transformers/model_doc/deta)** (from The University of Texas at Austin) released with the paper [NMS Strikes Back](https://arxiv.org/abs/2212.06137) by Jeffrey Ouyang-Zhang, Jang Hyun Cho, Xingyi Zhou, Philipp Krähenbühl.
|
||||||
1. **[DETR](https://huggingface.co/docs/transformers/model_doc/detr)** (from Facebook) released with the paper [End-to-End Object Detection with Transformers](https://arxiv.org/abs/2005.12872) by Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, Sergey Zagoruyko.
|
1. **[DETR](https://huggingface.co/docs/transformers/model_doc/detr)** (from Facebook) released with the paper [End-to-End Object Detection with Transformers](https://arxiv.org/abs/2005.12872) by Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, Sergey Zagoruyko.
|
||||||
1. **[DialoGPT](https://huggingface.co/docs/transformers/model_doc/dialogpt)** (from Microsoft Research) released with the paper [DialoGPT: Large-Scale Generative Pre-training for Conversational Response Generation](https://arxiv.org/abs/1911.00536) by Yizhe Zhang, Siqi Sun, Michel Galley, Yen-Chun Chen, Chris Brockett, Xiang Gao, Jianfeng Gao, Jingjing Liu, Bill Dolan.
|
1. **[DialoGPT](https://huggingface.co/docs/transformers/model_doc/dialogpt)** (from Microsoft Research) released with the paper [DialoGPT: Large-Scale Generative Pre-training for Conversational Response Generation](https://arxiv.org/abs/1911.00536) by Yizhe Zhang, Siqi Sun, Michel Galley, Yen-Chun Chen, Chris Brockett, Xiang Gao, Jianfeng Gao, Jingjing Liu, Bill Dolan.
|
||||||
|
1. **[DiNAT](https://huggingface.co/docs/transformers/model_doc/dinat)** (from SHI Labs) released with the paper [Dilated Neighborhood Attention Transformer](https://arxiv.org/abs/2209.15001) by Ali Hassani and Humphrey Shi.
|
||||||
1. **[DistilBERT](https://huggingface.co/docs/transformers/model_doc/distilbert)** (from HuggingFace), released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same method has been applied to compress GPT2 into [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/research_projects/distillation), RoBERTa into [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/research_projects/distillation), Multilingual BERT into [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/research_projects/distillation) and a German version of DistilBERT.
|
1. **[DistilBERT](https://huggingface.co/docs/transformers/model_doc/distilbert)** (from HuggingFace), released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same method has been applied to compress GPT2 into [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/research_projects/distillation), RoBERTa into [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/research_projects/distillation), Multilingual BERT into [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/research_projects/distillation) and a German version of DistilBERT.
|
||||||
1. **[DiT](https://huggingface.co/docs/transformers/model_doc/dit)** (from Microsoft Research) released with the paper [DiT: Self-supervised Pre-training for Document Image Transformer](https://arxiv.org/abs/2203.02378) by Junlong Li, Yiheng Xu, Tengchao Lv, Lei Cui, Cha Zhang, Furu Wei.
|
1. **[DiT](https://huggingface.co/docs/transformers/model_doc/dit)** (from Microsoft Research) released with the paper [DiT: Self-supervised Pre-training for Document Image Transformer](https://arxiv.org/abs/2203.02378) by Junlong Li, Yiheng Xu, Tengchao Lv, Lei Cui, Cha Zhang, Furu Wei.
|
||||||
1. **[Donut](https://huggingface.co/docs/transformers/model_doc/donut)** (from NAVER), released together with the paper [OCR-free Document Understanding Transformer](https://arxiv.org/abs/2111.15664) by Geewook Kim, Teakgyu Hong, Moonbin Yim, Jeongyeon Nam, Jinyoung Park, Jinyeong Yim, Wonseok Hwang, Sangdoo Yun, Dongyoon Han, Seunghyun Park.
|
1. **[Donut](https://huggingface.co/docs/transformers/model_doc/donut)** (from NAVER), released together with the paper [OCR-free Document Understanding Transformer](https://arxiv.org/abs/2111.15664) by Geewook Kim, Teakgyu Hong, Moonbin Yim, Jeongyeon Nam, Jinyoung Park, Jinyeong Yim, Wonseok Hwang, Sangdoo Yun, Dongyoon Han, Seunghyun Park.
|
||||||
1. **[DPR](https://huggingface.co/docs/transformers/model_doc/dpr)** (from Facebook) released with the paper [Dense Passage Retrieval for Open-Domain Question Answering](https://arxiv.org/abs/2004.04906) by Vladimir Karpukhin, Barlas Oğuz, Sewon Min, Patrick Lewis, Ledell Wu, Sergey Edunov, Danqi Chen, and Wen-tau Yih.
|
1. **[DPR](https://huggingface.co/docs/transformers/model_doc/dpr)** (from Facebook) released with the paper [Dense Passage Retrieval for Open-Domain Question Answering](https://arxiv.org/abs/2004.04906) by Vladimir Karpukhin, Barlas Oğuz, Sewon Min, Patrick Lewis, Ledell Wu, Sergey Edunov, Danqi Chen, and Wen-tau Yih.
|
||||||
1. **[DPT](https://huggingface.co/docs/transformers/master/model_doc/dpt)** (from Intel Labs) released with the paper [Vision Transformers for Dense Prediction](https://arxiv.org/abs/2103.13413) by René Ranftl, Alexey Bochkovskiy, Vladlen Koltun.
|
1. **[DPT](https://huggingface.co/docs/transformers/master/model_doc/dpt)** (from Intel Labs) released with the paper [Vision Transformers for Dense Prediction](https://arxiv.org/abs/2103.13413) by René Ranftl, Alexey Bochkovskiy, Vladlen Koltun.
|
||||||
|
1. **[EfficientFormer](https://huggingface.co/docs/transformers/model_doc/efficientformer)** (from Snap Research) released with the paper [EfficientFormer: Vision Transformers at MobileNetSpeed](https://arxiv.org/abs/2206.01191) by Yanyu Li, Geng Yuan, Yang Wen, Ju Hu, Georgios Evangelidis, Sergey Tulyakov, Yanzhi Wang, Jian Ren.
|
||||||
|
1. **[EfficientNet](https://huggingface.co/docs/transformers/model_doc/efficientnet)** (from Google Brain) released with the paper [EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks](https://arxiv.org/abs/1905.11946) by Mingxing Tan, Quoc V. Le.
|
||||||
1. **[ELECTRA](https://huggingface.co/docs/transformers/model_doc/electra)** (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang Luong, Quoc V. Le, Christopher D. Manning.
|
1. **[ELECTRA](https://huggingface.co/docs/transformers/model_doc/electra)** (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang Luong, Quoc V. Le, Christopher D. Manning.
|
||||||
|
1. **[EnCodec](https://huggingface.co/docs/transformers/model_doc/encodec)** (from Meta AI) released with the paper [High Fidelity Neural Audio Compression](https://arxiv.org/abs/2210.13438) by Alexandre Défossez, Jade Copet, Gabriel Synnaeve, Yossi Adi.
|
||||||
1. **[EncoderDecoder](https://huggingface.co/docs/transformers/model_doc/encoder-decoder)** (from Google Research) released with the paper [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://arxiv.org/abs/1907.12461) by Sascha Rothe, Shashi Narayan, Aliaksei Severyn.
|
1. **[EncoderDecoder](https://huggingface.co/docs/transformers/model_doc/encoder-decoder)** (from Google Research) released with the paper [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://arxiv.org/abs/1907.12461) by Sascha Rothe, Shashi Narayan, Aliaksei Severyn.
|
||||||
1. **[ERNIE](https://huggingface.co/docs/transformers/model_doc/ernie)** (from Baidu) released with the paper [ERNIE: Enhanced Representation through Knowledge Integration](https://arxiv.org/abs/1904.09223) by Yu Sun, Shuohuan Wang, Yukun Li, Shikun Feng, Xuyi Chen, Han Zhang, Xin Tian, Danxiang Zhu, Hao Tian, Hua Wu.
|
1. **[ERNIE](https://huggingface.co/docs/transformers/model_doc/ernie)** (from Baidu) released with the paper [ERNIE: Enhanced Representation through Knowledge Integration](https://arxiv.org/abs/1904.09223) by Yu Sun, Shuohuan Wang, Yukun Li, Shikun Feng, Xuyi Chen, Han Zhang, Xin Tian, Danxiang Zhu, Hao Tian, Hua Wu.
|
||||||
|
1. **[ErnieM](https://huggingface.co/docs/transformers/model_doc/ernie_m)** (from Baidu) released with the paper [ERNIE-M: Enhanced Multilingual Representation by Aligning Cross-lingual Semantics with Monolingual Corpora](https://arxiv.org/abs/2012.15674) by Xuan Ouyang, Shuohuan Wang, Chao Pang, Yu Sun, Hao Tian, Hua Wu, Haifeng Wang.
|
||||||
1. **[ESM](https://huggingface.co/docs/transformers/model_doc/esm)** (from Meta AI) are transformer protein language models. **ESM-1b** was released with the paper [Biological structure and function emerge from scaling unsupervised learning to 250 million protein sequences](https://www.pnas.org/content/118/15/e2016239118) by Alexander Rives, Joshua Meier, Tom Sercu, Siddharth Goyal, Zeming Lin, Jason Liu, Demi Guo, Myle Ott, C. Lawrence Zitnick, Jerry Ma, and Rob Fergus. **ESM-1v** was released with the paper [Language models enable zero-shot prediction of the effects of mutations on protein function](https://doi.org/10.1101/2021.07.09.450648) by Joshua Meier, Roshan Rao, Robert Verkuil, Jason Liu, Tom Sercu and Alexander Rives. **ESM-2** was released with the paper [Language models of protein sequences at the scale of evolution enable accurate structure prediction](https://doi.org/10.1101/2022.07.20.500902) by Zeming Lin, Halil Akin, Roshan Rao, Brian Hie, Zhongkai Zhu, Wenting Lu, Allan dos Santos Costa, Maryam Fazel-Zarandi, Tom Sercu, Sal Candido, Alexander Rives.
|
1. **[ESM](https://huggingface.co/docs/transformers/model_doc/esm)** (from Meta AI) are transformer protein language models. **ESM-1b** was released with the paper [Biological structure and function emerge from scaling unsupervised learning to 250 million protein sequences](https://www.pnas.org/content/118/15/e2016239118) by Alexander Rives, Joshua Meier, Tom Sercu, Siddharth Goyal, Zeming Lin, Jason Liu, Demi Guo, Myle Ott, C. Lawrence Zitnick, Jerry Ma, and Rob Fergus. **ESM-1v** was released with the paper [Language models enable zero-shot prediction of the effects of mutations on protein function](https://doi.org/10.1101/2021.07.09.450648) by Joshua Meier, Roshan Rao, Robert Verkuil, Jason Liu, Tom Sercu and Alexander Rives. **ESM-2** was released with the paper [Language models of protein sequences at the scale of evolution enable accurate structure prediction](https://doi.org/10.1101/2022.07.20.500902) by Zeming Lin, Halil Akin, Roshan Rao, Brian Hie, Zhongkai Zhu, Wenting Lu, Allan dos Santos Costa, Maryam Fazel-Zarandi, Tom Sercu, Sal Candido, Alexander Rives.
|
||||||
|
1. **[Falcon](https://huggingface.co/docs/transformers/model_doc/falcon)** (from Technology Innovation Institute) by Almazrouei, Ebtesam and Alobeidli, Hamza and Alshamsi, Abdulaziz and Cappelli, Alessandro and Cojocaru, Ruxandra and Debbah, Merouane and Goffinet, Etienne and Heslow, Daniel and Launay, Julien and Malartic, Quentin and Noune, Badreddine and Pannier, Baptiste and Penedo, Guilherme.
|
||||||
|
1. **[FLAN-T5](https://huggingface.co/docs/transformers/model_doc/flan-t5)** (from Google AI) released in the repository [google-research/t5x](https://github.com/google-research/t5x/blob/main/docs/models.md#flan-t5-checkpoints) by Hyung Won Chung, Le Hou, Shayne Longpre, Barret Zoph, Yi Tay, William Fedus, Eric Li, Xuezhi Wang, Mostafa Dehghani, Siddhartha Brahma, Albert Webson, Shixiang Shane Gu, Zhuyun Dai, Mirac Suzgun, Xinyun Chen, Aakanksha Chowdhery, Sharan Narang, Gaurav Mishra, Adams Yu, Vincent Zhao, Yanping Huang, Andrew Dai, Hongkun Yu, Slav Petrov, Ed H. Chi, Jeff Dean, Jacob Devlin, Adam Roberts, Denny Zhou, Quoc V. Le, and Jason Wei
|
||||||
|
1. **[FLAN-UL2](https://huggingface.co/docs/transformers/model_doc/flan-ul2)** (from Google AI) released in the repository [google-research/t5x](https://github.com/google-research/t5x/blob/main/docs/models.md#flan-ul2-checkpoints) by Hyung Won Chung, Le Hou, Shayne Longpre, Barret Zoph, Yi Tay, William Fedus, Eric Li, Xuezhi Wang, Mostafa Dehghani, Siddhartha Brahma, Albert Webson, Shixiang Shane Gu, Zhuyun Dai, Mirac Suzgun, Xinyun Chen, Aakanksha Chowdhery, Sharan Narang, Gaurav Mishra, Adams Yu, Vincent Zhao, Yanping Huang, Andrew Dai, Hongkun Yu, Slav Petrov, Ed H. Chi, Jeff Dean, Jacob Devlin, Adam Roberts, Denny Zhou, Quoc V. Le, and Jason Wei
|
||||||
1. **[FlauBERT](https://huggingface.co/docs/transformers/model_doc/flaubert)** (from CNRS) released with the paper [FlauBERT: Unsupervised Language Model Pre-training for French](https://arxiv.org/abs/1912.05372) by Hang Le, Loïc Vial, Jibril Frej, Vincent Segonne, Maximin Coavoux, Benjamin Lecouteux, Alexandre Allauzen, Benoît Crabbé, Laurent Besacier, Didier Schwab.
|
1. **[FlauBERT](https://huggingface.co/docs/transformers/model_doc/flaubert)** (from CNRS) released with the paper [FlauBERT: Unsupervised Language Model Pre-training for French](https://arxiv.org/abs/1912.05372) by Hang Le, Loïc Vial, Jibril Frej, Vincent Segonne, Maximin Coavoux, Benjamin Lecouteux, Alexandre Allauzen, Benoît Crabbé, Laurent Besacier, Didier Schwab.
|
||||||
1. **[FLAVA](https://huggingface.co/docs/transformers/model_doc/flava)** (from Facebook AI) released with the paper [FLAVA: A Foundational Language And Vision Alignment Model](https://arxiv.org/abs/2112.04482) by Amanpreet Singh, Ronghang Hu, Vedanuj Goswami, Guillaume Couairon, Wojciech Galuba, Marcus Rohrbach, and Douwe Kiela.
|
1. **[FLAVA](https://huggingface.co/docs/transformers/model_doc/flava)** (from Facebook AI) released with the paper [FLAVA: A Foundational Language And Vision Alignment Model](https://arxiv.org/abs/2112.04482) by Amanpreet Singh, Ronghang Hu, Vedanuj Goswami, Guillaume Couairon, Wojciech Galuba, Marcus Rohrbach, and Douwe Kiela.
|
||||||
1. **[FNet](https://huggingface.co/docs/transformers/model_doc/fnet)** (from Google Research) released with the paper [FNet: Mixing Tokens with Fourier Transforms](https://arxiv.org/abs/2105.03824) by James Lee-Thorp, Joshua Ainslie, Ilya Eckstein, Santiago Ontanon.
|
1. **[FNet](https://huggingface.co/docs/transformers/model_doc/fnet)** (from Google Research) released with the paper [FNet: Mixing Tokens with Fourier Transforms](https://arxiv.org/abs/2105.03824) by James Lee-Thorp, Joshua Ainslie, Ilya Eckstein, Santiago Ontanon.
|
||||||
|
1. **[FocalNet](https://huggingface.co/docs/transformers/model_doc/focalnet)** (from Microsoft Research) released with the paper [Focal Modulation Networks](https://arxiv.org/abs/2203.11926) by Jianwei Yang, Chunyuan Li, Xiyang Dai, Lu Yuan, Jianfeng Gao.
|
||||||
1. **[Funnel Transformer](https://huggingface.co/docs/transformers/model_doc/funnel)** (from CMU/Google Brain) released with the paper [Funnel-Transformer: Filtering out Sequential Redundancy for Efficient Language Processing](https://arxiv.org/abs/2006.03236) by Zihang Dai, Guokun Lai, Yiming Yang, Quoc V. Le.
|
1. **[Funnel Transformer](https://huggingface.co/docs/transformers/model_doc/funnel)** (from CMU/Google Brain) released with the paper [Funnel-Transformer: Filtering out Sequential Redundancy for Efficient Language Processing](https://arxiv.org/abs/2006.03236) by Zihang Dai, Guokun Lai, Yiming Yang, Quoc V. Le.
|
||||||
|
1. **[GIT](https://huggingface.co/docs/transformers/model_doc/git)** (from Microsoft Research) released with the paper [GIT: A Generative Image-to-text Transformer for Vision and Language](https://arxiv.org/abs/2205.14100) by Jianfeng Wang, Zhengyuan Yang, Xiaowei Hu, Linjie Li, Kevin Lin, Zhe Gan, Zicheng Liu, Ce Liu, Lijuan Wang.
|
||||||
1. **[GLPN](https://huggingface.co/docs/transformers/model_doc/glpn)** (from KAIST) released with the paper [Global-Local Path Networks for Monocular Depth Estimation with Vertical CutDepth](https://arxiv.org/abs/2201.07436) by Doyeon Kim, Woonghyun Ga, Pyungwhan Ahn, Donggyu Joo, Sehwan Chun, Junmo Kim.
|
1. **[GLPN](https://huggingface.co/docs/transformers/model_doc/glpn)** (from KAIST) released with the paper [Global-Local Path Networks for Monocular Depth Estimation with Vertical CutDepth](https://arxiv.org/abs/2201.07436) by Doyeon Kim, Woonghyun Ga, Pyungwhan Ahn, Donggyu Joo, Sehwan Chun, Junmo Kim.
|
||||||
1. **[GPT](https://huggingface.co/docs/transformers/model_doc/openai-gpt)** (from OpenAI) released with the paper [Improving Language Understanding by Generative Pre-Training](https://blog.openai.com/language-unsupervised/) by Alec Radford, Karthik Narasimhan, Tim Salimans and Ilya Sutskever.
|
1. **[GPT](https://huggingface.co/docs/transformers/model_doc/openai-gpt)** (from OpenAI) released with the paper [Improving Language Understanding by Generative Pre-Training](https://blog.openai.com/language-unsupervised/) by Alec Radford, Karthik Narasimhan, Tim Salimans and Ilya Sutskever.
|
||||||
1. **[GPT Neo](https://huggingface.co/docs/transformers/model_doc/gpt_neo)** (from EleutherAI) released in the repository [EleutherAI/gpt-neo](https://github.com/EleutherAI/gpt-neo) by Sid Black, Stella Biderman, Leo Gao, Phil Wang and Connor Leahy.
|
1. **[GPT Neo](https://huggingface.co/docs/transformers/model_doc/gpt_neo)** (from EleutherAI) released in the repository [EleutherAI/gpt-neo](https://github.com/EleutherAI/gpt-neo) by Sid Black, Stella Biderman, Leo Gao, Phil Wang and Connor Leahy.
|
||||||
@ -313,17 +343,26 @@ Número actual de puntos de control: ** (from ABEJA) released by Shinya Otani, Takayoshi Makabe, Anuj Arora, and Kyo Hattori.
|
1. **[GPT NeoX Japanese](https://huggingface.co/docs/transformers/model_doc/gpt_neox_japanese)** (from ABEJA) released by Shinya Otani, Takayoshi Makabe, Anuj Arora, and Kyo Hattori.
|
||||||
1. **[GPT-2](https://huggingface.co/docs/transformers/model_doc/gpt2)** (from OpenAI) released with the paper [Language Models are Unsupervised Multitask Learners](https://blog.openai.com/better-language-models/) by Alec Radford*, Jeffrey Wu*, Rewon Child, David Luan, Dario Amodei** and Ilya Sutskever**.
|
1. **[GPT-2](https://huggingface.co/docs/transformers/model_doc/gpt2)** (from OpenAI) released with the paper [Language Models are Unsupervised Multitask Learners](https://blog.openai.com/better-language-models/) by Alec Radford*, Jeffrey Wu*, Rewon Child, David Luan, Dario Amodei** and Ilya Sutskever**.
|
||||||
1. **[GPT-J](https://huggingface.co/docs/transformers/model_doc/gptj)** (from EleutherAI) released in the repository [kingoflolz/mesh-transformer-jax](https://github.com/kingoflolz/mesh-transformer-jax/) by Ben Wang and Aran Komatsuzaki.
|
1. **[GPT-J](https://huggingface.co/docs/transformers/model_doc/gptj)** (from EleutherAI) released in the repository [kingoflolz/mesh-transformer-jax](https://github.com/kingoflolz/mesh-transformer-jax/) by Ben Wang and Aran Komatsuzaki.
|
||||||
|
1. **[GPT-Sw3](https://huggingface.co/docs/transformers/model_doc/gpt-sw3)** (from AI-Sweden) released with the paper [Lessons Learned from GPT-SW3: Building the First Large-Scale Generative Language Model for Swedish](http://www.lrec-conf.org/proceedings/lrec2022/pdf/2022.lrec-1.376.pdf) by Ariel Ekgren, Amaru Cuba Gyllensten, Evangelia Gogoulou, Alice Heiman, Severine Verlinden, Joey Öhman, Fredrik Carlsson, Magnus Sahlgren.
|
||||||
|
1. **[GPTBigCode](https://huggingface.co/docs/transformers/model_doc/gpt_bigcode)** (from BigCode) released with the paper [SantaCoder: don't reach for the stars!](https://arxiv.org/abs/2301.03988) by Loubna Ben Allal, Raymond Li, Denis Kocetkov, Chenghao Mou, Christopher Akiki, Carlos Munoz Ferrandis, Niklas Muennighoff, Mayank Mishra, Alex Gu, Manan Dey, Logesh Kumar Umapathi, Carolyn Jane Anderson, Yangtian Zi, Joel Lamy Poirier, Hailey Schoelkopf, Sergey Troshin, Dmitry Abulkhanov, Manuel Romero, Michael Lappert, Francesco De Toni, Bernardo García del Río, Qian Liu, Shamik Bose, Urvashi Bhattacharyya, Terry Yue Zhuo, Ian Yu, Paulo Villegas, Marco Zocca, Sourab Mangrulkar, David Lansky, Huu Nguyen, Danish Contractor, Luis Villa, Jia Li, Dzmitry Bahdanau, Yacine Jernite, Sean Hughes, Daniel Fried, Arjun Guha, Harm de Vries, Leandro von Werra.
|
||||||
|
1. **[GPTSAN-japanese](https://huggingface.co/docs/transformers/model_doc/gptsan-japanese)** released in the repository [tanreinama/GPTSAN](https://github.com/tanreinama/GPTSAN/blob/main/report/model.md) by Toshiyuki Sakamoto(tanreinama).
|
||||||
|
1. **[Graphormer](https://huggingface.co/docs/transformers/model_doc/graphormer)** (from Microsoft) released with the paper [Do Transformers Really Perform Bad for Graph Representation?](https://arxiv.org/abs/2106.05234) by Chengxuan Ying, Tianle Cai, Shengjie Luo, Shuxin Zheng, Guolin Ke, Di He, Yanming Shen, Tie-Yan Liu.
|
||||||
1. **[GroupViT](https://huggingface.co/docs/transformers/model_doc/groupvit)** (from UCSD, NVIDIA) released with the paper [GroupViT: Semantic Segmentation Emerges from Text Supervision](https://arxiv.org/abs/2202.11094) by Jiarui Xu, Shalini De Mello, Sifei Liu, Wonmin Byeon, Thomas Breuel, Jan Kautz, Xiaolong Wang.
|
1. **[GroupViT](https://huggingface.co/docs/transformers/model_doc/groupvit)** (from UCSD, NVIDIA) released with the paper [GroupViT: Semantic Segmentation Emerges from Text Supervision](https://arxiv.org/abs/2202.11094) by Jiarui Xu, Shalini De Mello, Sifei Liu, Wonmin Byeon, Thomas Breuel, Jan Kautz, Xiaolong Wang.
|
||||||
1. **[Hubert](https://huggingface.co/docs/transformers/model_doc/hubert)** (from Facebook) released with the paper [HuBERT: Self-Supervised Speech Representation Learning by Masked Prediction of Hidden Units](https://arxiv.org/abs/2106.07447) by Wei-Ning Hsu, Benjamin Bolte, Yao-Hung Hubert Tsai, Kushal Lakhotia, Ruslan Salakhutdinov, Abdelrahman Mohamed.
|
1. **[Hubert](https://huggingface.co/docs/transformers/model_doc/hubert)** (from Facebook) released with the paper [HuBERT: Self-Supervised Speech Representation Learning by Masked Prediction of Hidden Units](https://arxiv.org/abs/2106.07447) by Wei-Ning Hsu, Benjamin Bolte, Yao-Hung Hubert Tsai, Kushal Lakhotia, Ruslan Salakhutdinov, Abdelrahman Mohamed.
|
||||||
1. **[I-BERT](https://huggingface.co/docs/transformers/model_doc/ibert)** (from Berkeley) released with the paper [I-BERT: Integer-only BERT Quantization](https://arxiv.org/abs/2101.01321) by Sehoon Kim, Amir Gholami, Zhewei Yao, Michael W. Mahoney, Kurt Keutzer.
|
1. **[I-BERT](https://huggingface.co/docs/transformers/model_doc/ibert)** (from Berkeley) released with the paper [I-BERT: Integer-only BERT Quantization](https://arxiv.org/abs/2101.01321) by Sehoon Kim, Amir Gholami, Zhewei Yao, Michael W. Mahoney, Kurt Keutzer.
|
||||||
1. **[ImageGPT](https://huggingface.co/docs/transformers/model_doc/imagegpt)** (from OpenAI) released with the paper [Generative Pretraining from Pixels](https://openai.com/blog/image-gpt/) by Mark Chen, Alec Radford, Rewon Child, Jeffrey Wu, Heewoo Jun, David Luan, Ilya Sutskever.
|
1. **[ImageGPT](https://huggingface.co/docs/transformers/model_doc/imagegpt)** (from OpenAI) released with the paper [Generative Pretraining from Pixels](https://openai.com/blog/image-gpt/) by Mark Chen, Alec Radford, Rewon Child, Jeffrey Wu, Heewoo Jun, David Luan, Ilya Sutskever.
|
||||||
|
1. **[Informer](https://huggingface.co/docs/transformers/model_doc/informer)** (from Beihang University, UC Berkeley, Rutgers University, SEDD Company) released with the paper [Informer: Beyond Efficient Transformer for Long Sequence Time-Series Forecasting](https://arxiv.org/abs/2012.07436) by Haoyi Zhou, Shanghang Zhang, Jieqi Peng, Shuai Zhang, Jianxin Li, Hui Xiong, and Wancai Zhang.
|
||||||
|
1. **[InstructBLIP](https://huggingface.co/docs/transformers/model_doc/instructblip)** (from Salesforce) released with the paper [InstructBLIP: Towards General-purpose Vision-Language Models with Instruction Tuning](https://arxiv.org/abs/2305.06500) by Wenliang Dai, Junnan Li, Dongxu Li, Anthony Meng Huat Tiong, Junqi Zhao, Weisheng Wang, Boyang Li, Pascale Fung, Steven Hoi.
|
||||||
|
1. **[Jukebox](https://huggingface.co/docs/transformers/model_doc/jukebox)** (from OpenAI) released with the paper [Jukebox: A Generative Model for Music](https://arxiv.org/pdf/2005.00341.pdf) by Prafulla Dhariwal, Heewoo Jun, Christine Payne, Jong Wook Kim, Alec Radford, Ilya Sutskever.
|
||||||
1. **[LayoutLM](https://huggingface.co/docs/transformers/model_doc/layoutlm)** (from Microsoft Research Asia) released with the paper [LayoutLM: Pre-training of Text and Layout for Document Image Understanding](https://arxiv.org/abs/1912.13318) by Yiheng Xu, Minghao Li, Lei Cui, Shaohan Huang, Furu Wei, Ming Zhou.
|
1. **[LayoutLM](https://huggingface.co/docs/transformers/model_doc/layoutlm)** (from Microsoft Research Asia) released with the paper [LayoutLM: Pre-training of Text and Layout for Document Image Understanding](https://arxiv.org/abs/1912.13318) by Yiheng Xu, Minghao Li, Lei Cui, Shaohan Huang, Furu Wei, Ming Zhou.
|
||||||
1. **[LayoutLMv2](https://huggingface.co/docs/transformers/model_doc/layoutlmv2)** (from Microsoft Research Asia) released with the paper [LayoutLMv2: Multi-modal Pre-training for Visually-Rich Document Understanding](https://arxiv.org/abs/2012.14740) by Yang Xu, Yiheng Xu, Tengchao Lv, Lei Cui, Furu Wei, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Wanxiang Che, Min Zhang, Lidong Zhou.
|
1. **[LayoutLMv2](https://huggingface.co/docs/transformers/model_doc/layoutlmv2)** (from Microsoft Research Asia) released with the paper [LayoutLMv2: Multi-modal Pre-training for Visually-Rich Document Understanding](https://arxiv.org/abs/2012.14740) by Yang Xu, Yiheng Xu, Tengchao Lv, Lei Cui, Furu Wei, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Wanxiang Che, Min Zhang, Lidong Zhou.
|
||||||
1. **[LayoutLMv3](https://huggingface.co/docs/transformers/model_doc/layoutlmv3)** (from Microsoft Research Asia) released with the paper [LayoutLMv3: Pre-training for Document AI with Unified Text and Image Masking](https://arxiv.org/abs/2204.08387) by Yupan Huang, Tengchao Lv, Lei Cui, Yutong Lu, Furu Wei.
|
1. **[LayoutLMv3](https://huggingface.co/docs/transformers/model_doc/layoutlmv3)** (from Microsoft Research Asia) released with the paper [LayoutLMv3: Pre-training for Document AI with Unified Text and Image Masking](https://arxiv.org/abs/2204.08387) by Yupan Huang, Tengchao Lv, Lei Cui, Yutong Lu, Furu Wei.
|
||||||
1. **[LayoutXLM](https://huggingface.co/docs/transformers/model_doc/layoutxlm)** (from Microsoft Research Asia) released with the paper [LayoutXLM: Multimodal Pre-training for Multilingual Visually-rich Document Understanding](https://arxiv.org/abs/2104.08836) by Yiheng Xu, Tengchao Lv, Lei Cui, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Furu Wei.
|
1. **[LayoutXLM](https://huggingface.co/docs/transformers/model_doc/layoutxlm)** (from Microsoft Research Asia) released with the paper [LayoutXLM: Multimodal Pre-training for Multilingual Visually-rich Document Understanding](https://arxiv.org/abs/2104.08836) by Yiheng Xu, Tengchao Lv, Lei Cui, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Furu Wei.
|
||||||
1. **[LED](https://huggingface.co/docs/transformers/model_doc/led)** (from AllenAI) released with the paper [Longformer: The Long-Document Transformer](https://arxiv.org/abs/2004.05150) by Iz Beltagy, Matthew E. Peters, Arman Cohan.
|
1. **[LED](https://huggingface.co/docs/transformers/model_doc/led)** (from AllenAI) released with the paper [Longformer: The Long-Document Transformer](https://arxiv.org/abs/2004.05150) by Iz Beltagy, Matthew E. Peters, Arman Cohan.
|
||||||
1. **[LeViT](https://huggingface.co/docs/transformers/model_doc/levit)** (from Meta AI) released with the paper [LeViT: A Vision Transformer in ConvNet's Clothing for Faster Inference](https://arxiv.org/abs/2104.01136) by Ben Graham, Alaaeldin El-Nouby, Hugo Touvron, Pierre Stock, Armand Joulin, Hervé Jégou, Matthijs Douze.
|
1. **[LeViT](https://huggingface.co/docs/transformers/model_doc/levit)** (from Meta AI) released with the paper [LeViT: A Vision Transformer in ConvNet's Clothing for Faster Inference](https://arxiv.org/abs/2104.01136) by Ben Graham, Alaaeldin El-Nouby, Hugo Touvron, Pierre Stock, Armand Joulin, Hervé Jégou, Matthijs Douze.
|
||||||
1. **[LiLT](https://huggingface.co/docs/transformers/main/model_doc/lilt)** (from South China University of Technology) released with the paper [LiLT: A Simple yet Effective Language-Independent Layout Transformer for Structured Document Understanding](https://arxiv.org/abs/2202.13669) by Jiapeng Wang, Lianwen Jin, Kai Ding.
|
1. **[LiLT](https://huggingface.co/docs/transformers/model_doc/lilt)** (from South China University of Technology) released with the paper [LiLT: A Simple yet Effective Language-Independent Layout Transformer for Structured Document Understanding](https://arxiv.org/abs/2202.13669) by Jiapeng Wang, Lianwen Jin, Kai Ding.
|
||||||
|
1. **[LLaMA](https://huggingface.co/docs/transformers/model_doc/llama)** (from The FAIR team of Meta AI) released with the paper [LLaMA: Open and Efficient Foundation Language Models](https://arxiv.org/abs/2302.13971) by Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timothée Lacroix, Baptiste Rozière, Naman Goyal, Eric Hambro, Faisal Azhar, Aurelien Rodriguez, Armand Joulin, Edouard Grave, Guillaume Lample.
|
||||||
|
1. **[Llama2](https://huggingface.co/docs/transformers/model_doc/llama2)** (from The FAIR team of Meta AI) released with the paper [Llama2: Open Foundation and Fine-Tuned Chat Models](https://ai.meta.com/research/publications/llama-2-open-foundation-and-fine-tuned-chat-models/XXX) by Hugo Touvron, Louis Martin, Kevin Stone, Peter Albert, Amjad Almahairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajjwal Bhargava, Shruti Bhosale, Dan Bikel, Lukas Blecher, Cristian Canton Ferrer, Moya Chen, Guillem Cucurull, David Esiobu, Jude Fernandes, Jeremy Fu, Wenyin Fu, Brian Fuller, Cynthia Gao, Vedanuj Goswami, Naman Goyal, Anthony Hartshorn, Saghar Hosseini, Rui Hou, Hakan Inan, Marcin Kardas, Viktor Kerkez Madian Khabsa, Isabel Kloumann, Artem Korenev, Punit Singh Koura, Marie-Anne Lachaux, Thibaut Lavril, Jenya Lee, Diana Liskovich, Yinghai Lu, Yuning Mao, Xavier Martinet, Todor Mihaylov, Pushka rMishra, Igor Molybog, Yixin Nie, Andrew Poulton, Jeremy Reizenstein, Rashi Rungta, Kalyan Saladi, Alan Schelten, Ruan Silva, Eric Michael Smith, Ranjan Subramanian, Xiaoqing EllenTan, Binh Tang, Ross Taylor, Adina Williams, Jian Xiang Kuan, Puxin Xu, Zheng Yan, Iliyan Zarov, Yuchen Zhang, Angela Fan, Melanie Kambadur, Sharan Narang, Aurelien Rodriguez, Robert Stojnic, Sergey Edunov, Thomas Scialom..
|
||||||
1. **[Longformer](https://huggingface.co/docs/transformers/model_doc/longformer)** (from AllenAI) released with the paper [Longformer: The Long-Document Transformer](https://arxiv.org/abs/2004.05150) by Iz Beltagy, Matthew E. Peters, Arman Cohan.
|
1. **[Longformer](https://huggingface.co/docs/transformers/model_doc/longformer)** (from AllenAI) released with the paper [Longformer: The Long-Document Transformer](https://arxiv.org/abs/2004.05150) by Iz Beltagy, Matthew E. Peters, Arman Cohan.
|
||||||
1. **[LongT5](https://huggingface.co/docs/transformers/model_doc/longt5)** (from Google AI) released with the paper [LongT5: Efficient Text-To-Text Transformer for Long Sequences](https://arxiv.org/abs/2112.07916) by Mandy Guo, Joshua Ainslie, David Uthus, Santiago Ontanon, Jianmo Ni, Yun-Hsuan Sung, Yinfei Yang.
|
1. **[LongT5](https://huggingface.co/docs/transformers/model_doc/longt5)** (from Google AI) released with the paper [LongT5: Efficient Text-To-Text Transformer for Long Sequences](https://arxiv.org/abs/2112.07916) by Mandy Guo, Joshua Ainslie, David Uthus, Santiago Ontanon, Jianmo Ni, Yun-Hsuan Sung, Yinfei Yang.
|
||||||
1. **[LUKE](https://huggingface.co/docs/transformers/model_doc/luke)** (from Studio Ousia) released with the paper [LUKE: Deep Contextualized Entity Representations with Entity-aware Self-attention](https://arxiv.org/abs/2010.01057) by Ikuya Yamada, Akari Asai, Hiroyuki Shindo, Hideaki Takeda, Yuji Matsumoto.
|
1. **[LUKE](https://huggingface.co/docs/transformers/model_doc/luke)** (from Studio Ousia) released with the paper [LUKE: Deep Contextualized Entity Representations with Entity-aware Self-attention](https://arxiv.org/abs/2010.01057) by Ikuya Yamada, Akari Asai, Hiroyuki Shindo, Hideaki Takeda, Yuji Matsumoto.
|
||||||
@ -332,26 +371,41 @@ Número actual de puntos de control: ** (from Facebook) released with the paper [Beyond English-Centric Multilingual Machine Translation](https://arxiv.org/abs/2010.11125) by Angela Fan, Shruti Bhosale, Holger Schwenk, Zhiyi Ma, Ahmed El-Kishky, Siddharth Goyal, Mandeep Baines, Onur Celebi, Guillaume Wenzek, Vishrav Chaudhary, Naman Goyal, Tom Birch, Vitaliy Liptchinsky, Sergey Edunov, Edouard Grave, Michael Auli, Armand Joulin.
|
1. **[M2M100](https://huggingface.co/docs/transformers/model_doc/m2m_100)** (from Facebook) released with the paper [Beyond English-Centric Multilingual Machine Translation](https://arxiv.org/abs/2010.11125) by Angela Fan, Shruti Bhosale, Holger Schwenk, Zhiyi Ma, Ahmed El-Kishky, Siddharth Goyal, Mandeep Baines, Onur Celebi, Guillaume Wenzek, Vishrav Chaudhary, Naman Goyal, Tom Birch, Vitaliy Liptchinsky, Sergey Edunov, Edouard Grave, Michael Auli, Armand Joulin.
|
||||||
1. **[MarianMT](https://huggingface.co/docs/transformers/model_doc/marian)** Machine translation models trained using [OPUS](http://opus.nlpl.eu/) data by Jörg Tiedemann. The [Marian Framework](https://marian-nmt.github.io/) is being developed by the Microsoft Translator Team.
|
1. **[MarianMT](https://huggingface.co/docs/transformers/model_doc/marian)** Machine translation models trained using [OPUS](http://opus.nlpl.eu/) data by Jörg Tiedemann. The [Marian Framework](https://marian-nmt.github.io/) is being developed by the Microsoft Translator Team.
|
||||||
1. **[MarkupLM](https://huggingface.co/docs/transformers/model_doc/markuplm)** (from Microsoft Research Asia) released with the paper [MarkupLM: Pre-training of Text and Markup Language for Visually-rich Document Understanding](https://arxiv.org/abs/2110.08518) by Junlong Li, Yiheng Xu, Lei Cui, Furu Wei.
|
1. **[MarkupLM](https://huggingface.co/docs/transformers/model_doc/markuplm)** (from Microsoft Research Asia) released with the paper [MarkupLM: Pre-training of Text and Markup Language for Visually-rich Document Understanding](https://arxiv.org/abs/2110.08518) by Junlong Li, Yiheng Xu, Lei Cui, Furu Wei.
|
||||||
|
1. **[Mask2Former](https://huggingface.co/docs/transformers/model_doc/mask2former)** (from FAIR and UIUC) released with the paper [Masked-attention Mask Transformer for Universal Image Segmentation](https://arxiv.org/abs/2112.01527) by Bowen Cheng, Ishan Misra, Alexander G. Schwing, Alexander Kirillov, Rohit Girdhar.
|
||||||
1. **[MaskFormer](https://huggingface.co/docs/transformers/model_doc/maskformer)** (from Meta and UIUC) released with the paper [Per-Pixel Classification is Not All You Need for Semantic Segmentation](https://arxiv.org/abs/2107.06278) by Bowen Cheng, Alexander G. Schwing, Alexander Kirillov.
|
1. **[MaskFormer](https://huggingface.co/docs/transformers/model_doc/maskformer)** (from Meta and UIUC) released with the paper [Per-Pixel Classification is Not All You Need for Semantic Segmentation](https://arxiv.org/abs/2107.06278) by Bowen Cheng, Alexander G. Schwing, Alexander Kirillov.
|
||||||
|
1. **[MatCha](https://huggingface.co/docs/transformers/model_doc/matcha)** (from Google AI) released with the paper [MatCha: Enhancing Visual Language Pretraining with Math Reasoning and Chart Derendering](https://arxiv.org/abs/2212.09662) by Fangyu Liu, Francesco Piccinno, Syrine Krichene, Chenxi Pang, Kenton Lee, Mandar Joshi, Yasemin Altun, Nigel Collier, Julian Martin Eisenschlos.
|
||||||
1. **[mBART](https://huggingface.co/docs/transformers/model_doc/mbart)** (from Facebook) released with the paper [Multilingual Denoising Pre-training for Neural Machine Translation](https://arxiv.org/abs/2001.08210) by Yinhan Liu, Jiatao Gu, Naman Goyal, Xian Li, Sergey Edunov, Marjan Ghazvininejad, Mike Lewis, Luke Zettlemoyer.
|
1. **[mBART](https://huggingface.co/docs/transformers/model_doc/mbart)** (from Facebook) released with the paper [Multilingual Denoising Pre-training for Neural Machine Translation](https://arxiv.org/abs/2001.08210) by Yinhan Liu, Jiatao Gu, Naman Goyal, Xian Li, Sergey Edunov, Marjan Ghazvininejad, Mike Lewis, Luke Zettlemoyer.
|
||||||
1. **[mBART-50](https://huggingface.co/docs/transformers/model_doc/mbart)** (from Facebook) released with the paper [Multilingual Translation with Extensible Multilingual Pretraining and Finetuning](https://arxiv.org/abs/2008.00401) by Yuqing Tang, Chau Tran, Xian Li, Peng-Jen Chen, Naman Goyal, Vishrav Chaudhary, Jiatao Gu, Angela Fan.
|
1. **[mBART-50](https://huggingface.co/docs/transformers/model_doc/mbart)** (from Facebook) released with the paper [Multilingual Translation with Extensible Multilingual Pretraining and Finetuning](https://arxiv.org/abs/2008.00401) by Yuqing Tang, Chau Tran, Xian Li, Peng-Jen Chen, Naman Goyal, Vishrav Chaudhary, Jiatao Gu, Angela Fan.
|
||||||
|
1. **[MEGA](https://huggingface.co/docs/transformers/model_doc/mega)** (from Facebook) released with the paper [Mega: Moving Average Equipped Gated Attention](https://arxiv.org/abs/2209.10655) by Xuezhe Ma, Chunting Zhou, Xiang Kong, Junxian He, Liangke Gui, Graham Neubig, Jonathan May, and Luke Zettlemoyer.
|
||||||
1. **[Megatron-BERT](https://huggingface.co/docs/transformers/model_doc/megatron-bert)** (from NVIDIA) released with the paper [Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism](https://arxiv.org/abs/1909.08053) by Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper and Bryan Catanzaro.
|
1. **[Megatron-BERT](https://huggingface.co/docs/transformers/model_doc/megatron-bert)** (from NVIDIA) released with the paper [Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism](https://arxiv.org/abs/1909.08053) by Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper and Bryan Catanzaro.
|
||||||
1. **[Megatron-GPT2](https://huggingface.co/docs/transformers/model_doc/megatron_gpt2)** (from NVIDIA) released with the paper [Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism](https://arxiv.org/abs/1909.08053) by Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper and Bryan Catanzaro.
|
1. **[Megatron-GPT2](https://huggingface.co/docs/transformers/model_doc/megatron_gpt2)** (from NVIDIA) released with the paper [Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism](https://arxiv.org/abs/1909.08053) by Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper and Bryan Catanzaro.
|
||||||
|
1. **[MGP-STR](https://huggingface.co/docs/transformers/model_doc/mgp-str)** (from Alibaba Research) released with the paper [Multi-Granularity Prediction for Scene Text Recognition](https://arxiv.org/abs/2209.03592) by Peng Wang, Cheng Da, and Cong Yao.
|
||||||
1. **[mLUKE](https://huggingface.co/docs/transformers/model_doc/mluke)** (from Studio Ousia) released with the paper [mLUKE: The Power of Entity Representations in Multilingual Pretrained Language Models](https://arxiv.org/abs/2110.08151) by Ryokan Ri, Ikuya Yamada, and Yoshimasa Tsuruoka.
|
1. **[mLUKE](https://huggingface.co/docs/transformers/model_doc/mluke)** (from Studio Ousia) released with the paper [mLUKE: The Power of Entity Representations in Multilingual Pretrained Language Models](https://arxiv.org/abs/2110.08151) by Ryokan Ri, Ikuya Yamada, and Yoshimasa Tsuruoka.
|
||||||
|
1. **[MMS](https://huggingface.co/docs/transformers/model_doc/mms)** (from Facebook) released with the paper [Scaling Speech Technology to 1,000+ Languages](https://arxiv.org/abs/2305.13516) by Vineel Pratap, Andros Tjandra, Bowen Shi, Paden Tomasello, Arun Babu, Sayani Kundu, Ali Elkahky, Zhaoheng Ni, Apoorv Vyas, Maryam Fazel-Zarandi, Alexei Baevski, Yossi Adi, Xiaohui Zhang, Wei-Ning Hsu, Alexis Conneau, Michael Auli.
|
||||||
1. **[MobileBERT](https://huggingface.co/docs/transformers/model_doc/mobilebert)** (from CMU/Google Brain) released with the paper [MobileBERT: a Compact Task-Agnostic BERT for Resource-Limited Devices](https://arxiv.org/abs/2004.02984) by Zhiqing Sun, Hongkun Yu, Xiaodan Song, Renjie Liu, Yiming Yang, and Denny Zhou.
|
1. **[MobileBERT](https://huggingface.co/docs/transformers/model_doc/mobilebert)** (from CMU/Google Brain) released with the paper [MobileBERT: a Compact Task-Agnostic BERT for Resource-Limited Devices](https://arxiv.org/abs/2004.02984) by Zhiqing Sun, Hongkun Yu, Xiaodan Song, Renjie Liu, Yiming Yang, and Denny Zhou.
|
||||||
|
1. **[MobileNetV1](https://huggingface.co/docs/transformers/model_doc/mobilenet_v1)** (from Google Inc.) released with the paper [MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications](https://arxiv.org/abs/1704.04861) by Andrew G. Howard, Menglong Zhu, Bo Chen, Dmitry Kalenichenko, Weijun Wang, Tobias Weyand, Marco Andreetto, Hartwig Adam.
|
||||||
|
1. **[MobileNetV2](https://huggingface.co/docs/transformers/model_doc/mobilenet_v2)** (from Google Inc.) released with the paper [MobileNetV2: Inverted Residuals and Linear Bottlenecks](https://arxiv.org/abs/1801.04381) by Mark Sandler, Andrew Howard, Menglong Zhu, Andrey Zhmoginov, Liang-Chieh Chen.
|
||||||
1. **[MobileViT](https://huggingface.co/docs/transformers/model_doc/mobilevit)** (from Apple) released with the paper [MobileViT: Light-weight, General-purpose, and Mobile-friendly Vision Transformer](https://arxiv.org/abs/2110.02178) by Sachin Mehta and Mohammad Rastegari.
|
1. **[MobileViT](https://huggingface.co/docs/transformers/model_doc/mobilevit)** (from Apple) released with the paper [MobileViT: Light-weight, General-purpose, and Mobile-friendly Vision Transformer](https://arxiv.org/abs/2110.02178) by Sachin Mehta and Mohammad Rastegari.
|
||||||
|
1. **[MobileViTV2](https://huggingface.co/docs/transformers/model_doc/mobilevitv2)** (from Apple) released with the paper [Separable Self-attention for Mobile Vision Transformers](https://arxiv.org/abs/2206.02680) by Sachin Mehta and Mohammad Rastegari.
|
||||||
1. **[MPNet](https://huggingface.co/docs/transformers/model_doc/mpnet)** (from Microsoft Research) released with the paper [MPNet: Masked and Permuted Pre-training for Language Understanding](https://arxiv.org/abs/2004.09297) by Kaitao Song, Xu Tan, Tao Qin, Jianfeng Lu, Tie-Yan Liu.
|
1. **[MPNet](https://huggingface.co/docs/transformers/model_doc/mpnet)** (from Microsoft Research) released with the paper [MPNet: Masked and Permuted Pre-training for Language Understanding](https://arxiv.org/abs/2004.09297) by Kaitao Song, Xu Tan, Tao Qin, Jianfeng Lu, Tie-Yan Liu.
|
||||||
|
1. **[MRA](https://huggingface.co/docs/transformers/model_doc/mra)** (from the University of Wisconsin - Madison) released with the paper [Multi Resolution Analysis (MRA)](https://arxiv.org/abs/2207.10284) by Zhanpeng Zeng, Sourav Pal, Jeffery Kline, Glenn M Fung, Vikas Singh.
|
||||||
1. **[MT5](https://huggingface.co/docs/transformers/model_doc/mt5)** (from Google AI) released with the paper [mT5: A massively multilingual pre-trained text-to-text transformer](https://arxiv.org/abs/2010.11934) by Linting Xue, Noah Constant, Adam Roberts, Mihir Kale, Rami Al-Rfou, Aditya Siddhant, Aditya Barua, Colin Raffel.
|
1. **[MT5](https://huggingface.co/docs/transformers/model_doc/mt5)** (from Google AI) released with the paper [mT5: A massively multilingual pre-trained text-to-text transformer](https://arxiv.org/abs/2010.11934) by Linting Xue, Noah Constant, Adam Roberts, Mihir Kale, Rami Al-Rfou, Aditya Siddhant, Aditya Barua, Colin Raffel.
|
||||||
|
1. **[MusicGen](https://huggingface.co/docs/transformers/model_doc/musicgen)** (from Meta) released with the paper [Simple and Controllable Music Generation](https://arxiv.org/abs/2306.05284) by Jade Copet, Felix Kreuk, Itai Gat, Tal Remez, David Kant, Gabriel Synnaeve, Yossi Adi and Alexandre Défossez.
|
||||||
1. **[MVP](https://huggingface.co/docs/transformers/model_doc/mvp)** (from RUC AI Box) released with the paper [MVP: Multi-task Supervised Pre-training for Natural Language Generation](https://arxiv.org/abs/2206.12131) by Tianyi Tang, Junyi Li, Wayne Xin Zhao and Ji-Rong Wen.
|
1. **[MVP](https://huggingface.co/docs/transformers/model_doc/mvp)** (from RUC AI Box) released with the paper [MVP: Multi-task Supervised Pre-training for Natural Language Generation](https://arxiv.org/abs/2206.12131) by Tianyi Tang, Junyi Li, Wayne Xin Zhao and Ji-Rong Wen.
|
||||||
|
1. **[NAT](https://huggingface.co/docs/transformers/model_doc/nat)** (from SHI Labs) released with the paper [Neighborhood Attention Transformer](https://arxiv.org/abs/2204.07143) by Ali Hassani, Steven Walton, Jiachen Li, Shen Li, and Humphrey Shi.
|
||||||
1. **[Nezha](https://huggingface.co/docs/transformers/model_doc/nezha)** (from Huawei Noah’s Ark Lab) released with the paper [NEZHA: Neural Contextualized Representation for Chinese Language Understanding](https://arxiv.org/abs/1909.00204) by Junqiu Wei, Xiaozhe Ren, Xiaoguang Li, Wenyong Huang, Yi Liao, Yasheng Wang, Jiashu Lin, Xin Jiang, Xiao Chen and Qun Liu.
|
1. **[Nezha](https://huggingface.co/docs/transformers/model_doc/nezha)** (from Huawei Noah’s Ark Lab) released with the paper [NEZHA: Neural Contextualized Representation for Chinese Language Understanding](https://arxiv.org/abs/1909.00204) by Junqiu Wei, Xiaozhe Ren, Xiaoguang Li, Wenyong Huang, Yi Liao, Yasheng Wang, Jiashu Lin, Xin Jiang, Xiao Chen and Qun Liu.
|
||||||
1. **[NLLB](https://huggingface.co/docs/transformers/model_doc/nllb)** (from Meta) released with the paper [No Language Left Behind: Scaling Human-Centered Machine Translation](https://arxiv.org/abs/2207.04672) by the NLLB team.
|
1. **[NLLB](https://huggingface.co/docs/transformers/model_doc/nllb)** (from Meta) released with the paper [No Language Left Behind: Scaling Human-Centered Machine Translation](https://arxiv.org/abs/2207.04672) by the NLLB team.
|
||||||
|
1. **[NLLB-MOE](https://huggingface.co/docs/transformers/model_doc/nllb-moe)** (from Meta) released with the paper [No Language Left Behind: Scaling Human-Centered Machine Translation](https://arxiv.org/abs/2207.04672) by the NLLB team.
|
||||||
1. **[Nyströmformer](https://huggingface.co/docs/transformers/model_doc/nystromformer)** (from the University of Wisconsin - Madison) released with the paper [Nyströmformer: A Nyström-Based Algorithm for Approximating Self-Attention](https://arxiv.org/abs/2102.03902) by Yunyang Xiong, Zhanpeng Zeng, Rudrasis Chakraborty, Mingxing Tan, Glenn Fung, Yin Li, Vikas Singh.
|
1. **[Nyströmformer](https://huggingface.co/docs/transformers/model_doc/nystromformer)** (from the University of Wisconsin - Madison) released with the paper [Nyströmformer: A Nyström-Based Algorithm for Approximating Self-Attention](https://arxiv.org/abs/2102.03902) by Yunyang Xiong, Zhanpeng Zeng, Rudrasis Chakraborty, Mingxing Tan, Glenn Fung, Yin Li, Vikas Singh.
|
||||||
|
1. **[OneFormer](https://huggingface.co/docs/transformers/model_doc/oneformer)** (from SHI Labs) released with the paper [OneFormer: One Transformer to Rule Universal Image Segmentation](https://arxiv.org/abs/2211.06220) by Jitesh Jain, Jiachen Li, MangTik Chiu, Ali Hassani, Nikita Orlov, Humphrey Shi.
|
||||||
|
1. **[OpenLlama](https://huggingface.co/docs/transformers/model_doc/open-llama)** (from [s-JoL](https://huggingface.co/s-JoL)) released in [Open-Llama](https://github.com/s-JoL/Open-Llama).
|
||||||
1. **[OPT](https://huggingface.co/docs/transformers/master/model_doc/opt)** (from Meta AI) released with the paper [OPT: Open Pre-trained Transformer Language Models](https://arxiv.org/abs/2205.01068) by Susan Zhang, Stephen Roller, Naman Goyal, Mikel Artetxe, Moya Chen, Shuohui Chen et al.
|
1. **[OPT](https://huggingface.co/docs/transformers/master/model_doc/opt)** (from Meta AI) released with the paper [OPT: Open Pre-trained Transformer Language Models](https://arxiv.org/abs/2205.01068) by Susan Zhang, Stephen Roller, Naman Goyal, Mikel Artetxe, Moya Chen, Shuohui Chen et al.
|
||||||
1. **[OWL-ViT](https://huggingface.co/docs/transformers/model_doc/owlvit)** (from Google AI) released with the paper [Simple Open-Vocabulary Object Detection with Vision Transformers](https://arxiv.org/abs/2205.06230) by Matthias Minderer, Alexey Gritsenko, Austin Stone, Maxim Neumann, Dirk Weissenborn, Alexey Dosovitskiy, Aravindh Mahendran, Anurag Arnab, Mostafa Dehghani, Zhuoran Shen, Xiao Wang, Xiaohua Zhai, Thomas Kipf, and Neil Houlsby.
|
1. **[OWL-ViT](https://huggingface.co/docs/transformers/model_doc/owlvit)** (from Google AI) released with the paper [Simple Open-Vocabulary Object Detection with Vision Transformers](https://arxiv.org/abs/2205.06230) by Matthias Minderer, Alexey Gritsenko, Austin Stone, Maxim Neumann, Dirk Weissenborn, Alexey Dosovitskiy, Aravindh Mahendran, Anurag Arnab, Mostafa Dehghani, Zhuoran Shen, Xiao Wang, Xiaohua Zhai, Thomas Kipf, and Neil Houlsby.
|
||||||
1. **[Pegasus](https://huggingface.co/docs/transformers/model_doc/pegasus)** (from Google) released with the paper [PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive Summarization](https://arxiv.org/abs/1912.08777) by Jingqing Zhang, Yao Zhao, Mohammad Saleh and Peter J. Liu.
|
1. **[Pegasus](https://huggingface.co/docs/transformers/model_doc/pegasus)** (from Google) released with the paper [PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive Summarization](https://arxiv.org/abs/1912.08777) by Jingqing Zhang, Yao Zhao, Mohammad Saleh and Peter J. Liu.
|
||||||
1. **[PEGASUS-X](https://huggingface.co/docs/transformers/model_doc/pegasus_x)** (from Google) released with the paper [Investigating Efficiently Extending Transformers for Long Input Summarization](https://arxiv.org/abs/2208.04347) by Jason Phang, Yao Zhao, and Peter J. Liu.
|
1. **[PEGASUS-X](https://huggingface.co/docs/transformers/model_doc/pegasus_x)** (from Google) released with the paper [Investigating Efficiently Extending Transformers for Long Input Summarization](https://arxiv.org/abs/2208.04347) by Jason Phang, Yao Zhao, and Peter J. Liu.
|
||||||
1. **[Perceiver IO](https://huggingface.co/docs/transformers/model_doc/perceiver)** (from Deepmind) released with the paper [Perceiver IO: A General Architecture for Structured Inputs & Outputs](https://arxiv.org/abs/2107.14795) by Andrew Jaegle, Sebastian Borgeaud, Jean-Baptiste Alayrac, Carl Doersch, Catalin Ionescu, David Ding, Skanda Koppula, Daniel Zoran, Andrew Brock, Evan Shelhamer, Olivier Hénaff, Matthew M. Botvinick, Andrew Zisserman, Oriol Vinyals, João Carreira.
|
1. **[Perceiver IO](https://huggingface.co/docs/transformers/model_doc/perceiver)** (from Deepmind) released with the paper [Perceiver IO: A General Architecture for Structured Inputs & Outputs](https://arxiv.org/abs/2107.14795) by Andrew Jaegle, Sebastian Borgeaud, Jean-Baptiste Alayrac, Carl Doersch, Catalin Ionescu, David Ding, Skanda Koppula, Daniel Zoran, Andrew Brock, Evan Shelhamer, Olivier Hénaff, Matthew M. Botvinick, Andrew Zisserman, Oriol Vinyals, João Carreira.
|
||||||
1. **[PhoBERT](https://huggingface.co/docs/transformers/model_doc/phobert)** (from VinAI Research) released with the paper [PhoBERT: Pre-trained language models for Vietnamese](https://www.aclweb.org/anthology/2020.findings-emnlp.92/) by Dat Quoc Nguyen and Anh Tuan Nguyen.
|
1. **[PhoBERT](https://huggingface.co/docs/transformers/model_doc/phobert)** (from VinAI Research) released with the paper [PhoBERT: Pre-trained language models for Vietnamese](https://www.aclweb.org/anthology/2020.findings-emnlp.92/) by Dat Quoc Nguyen and Anh Tuan Nguyen.
|
||||||
|
1. **[Pix2Struct](https://huggingface.co/docs/transformers/model_doc/pix2struct)** (from Google) released with the paper [Pix2Struct: Screenshot Parsing as Pretraining for Visual Language Understanding](https://arxiv.org/abs/2210.03347) by Kenton Lee, Mandar Joshi, Iulia Turc, Hexiang Hu, Fangyu Liu, Julian Eisenschlos, Urvashi Khandelwal, Peter Shaw, Ming-Wei Chang, Kristina Toutanova.
|
||||||
1. **[PLBart](https://huggingface.co/docs/transformers/model_doc/plbart)** (from UCLA NLP) released with the paper [Unified Pre-training for Program Understanding and Generation](https://arxiv.org/abs/2103.06333) by Wasi Uddin Ahmad, Saikat Chakraborty, Baishakhi Ray, Kai-Wei Chang.
|
1. **[PLBart](https://huggingface.co/docs/transformers/model_doc/plbart)** (from UCLA NLP) released with the paper [Unified Pre-training for Program Understanding and Generation](https://arxiv.org/abs/2103.06333) by Wasi Uddin Ahmad, Saikat Chakraborty, Baishakhi Ray, Kai-Wei Chang.
|
||||||
1. **[PoolFormer](https://huggingface.co/docs/transformers/model_doc/poolformer)** (from Sea AI Labs) released with the paper [MetaFormer is Actually What You Need for Vision](https://arxiv.org/abs/2111.11418) by Yu, Weihao and Luo, Mi and Zhou, Pan and Si, Chenyang and Zhou, Yichen and Wang, Xinchao and Feng, Jiashi and Yan, Shuicheng.
|
1. **[PoolFormer](https://huggingface.co/docs/transformers/model_doc/poolformer)** (from Sea AI Labs) released with the paper [MetaFormer is Actually What You Need for Vision](https://arxiv.org/abs/2111.11418) by Yu, Weihao and Luo, Mi and Zhou, Pan and Si, Chenyang and Zhou, Yichen and Wang, Xinchao and Feng, Jiashi and Yan, Shuicheng.
|
||||||
1. **[ProphetNet](https://huggingface.co/docs/transformers/model_doc/prophetnet)** (from Microsoft Research) released with the paper [ProphetNet: Predicting Future N-gram for Sequence-to-Sequence Pre-training](https://arxiv.org/abs/2001.04063) by Yu Yan, Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang and Ming Zhou.
|
1. **[ProphetNet](https://huggingface.co/docs/transformers/model_doc/prophetnet)** (from Microsoft Research) released with the paper [ProphetNet: Predicting Future N-gram for Sequence-to-Sequence Pre-training](https://arxiv.org/abs/2001.04063) by Yu Yan, Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang and Ming Zhou.
|
||||||
@ -363,45 +417,62 @@ Número actual de puntos de control: ** (from Google Research) released with the paper [Rethinking embedding coupling in pre-trained language models](https://arxiv.org/abs/2010.12821) by Hyung Won Chung, Thibault Févry, Henry Tsai, M. Johnson, Sebastian Ruder.
|
1. **[RemBERT](https://huggingface.co/docs/transformers/model_doc/rembert)** (from Google Research) released with the paper [Rethinking embedding coupling in pre-trained language models](https://arxiv.org/abs/2010.12821) by Hyung Won Chung, Thibault Févry, Henry Tsai, M. Johnson, Sebastian Ruder.
|
||||||
1. **[ResNet](https://huggingface.co/docs/transformers/model_doc/resnet)** (from Microsoft Research) released with the paper [Deep Residual Learning for Image Recognition](https://arxiv.org/abs/1512.03385) by Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun.
|
1. **[ResNet](https://huggingface.co/docs/transformers/model_doc/resnet)** (from Microsoft Research) released with the paper [Deep Residual Learning for Image Recognition](https://arxiv.org/abs/1512.03385) by Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun.
|
||||||
1. **[RoBERTa](https://huggingface.co/docs/transformers/model_doc/roberta)** (from Facebook), released together with the paper [RoBERTa: A Robustly Optimized BERT Pretraining Approach](https://arxiv.org/abs/1907.11692) by Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, Veselin Stoyanov.
|
1. **[RoBERTa](https://huggingface.co/docs/transformers/model_doc/roberta)** (from Facebook), released together with the paper [RoBERTa: A Robustly Optimized BERT Pretraining Approach](https://arxiv.org/abs/1907.11692) by Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, Veselin Stoyanov.
|
||||||
|
1. **[RoBERTa-PreLayerNorm](https://huggingface.co/docs/transformers/model_doc/roberta-prelayernorm)** (from Facebook) released with the paper [fairseq: A Fast, Extensible Toolkit for Sequence Modeling](https://arxiv.org/abs/1904.01038) by Myle Ott, Sergey Edunov, Alexei Baevski, Angela Fan, Sam Gross, Nathan Ng, David Grangier, Michael Auli.
|
||||||
|
1. **[RoCBert](https://huggingface.co/docs/transformers/model_doc/roc_bert)** (from WeChatAI) released with the paper [RoCBert: Robust Chinese Bert with Multimodal Contrastive Pretraining](https://aclanthology.org/2022.acl-long.65.pdf) by HuiSu, WeiweiShi, XiaoyuShen, XiaoZhou, TuoJi, JiaruiFang, JieZhou.
|
||||||
1. **[RoFormer](https://huggingface.co/docs/transformers/model_doc/roformer)** (from ZhuiyiTechnology), released together with the paper [RoFormer: Enhanced Transformer with Rotary Position Embedding](https://arxiv.org/abs/2104.09864) by Jianlin Su and Yu Lu and Shengfeng Pan and Bo Wen and Yunfeng Liu.
|
1. **[RoFormer](https://huggingface.co/docs/transformers/model_doc/roformer)** (from ZhuiyiTechnology), released together with the paper [RoFormer: Enhanced Transformer with Rotary Position Embedding](https://arxiv.org/abs/2104.09864) by Jianlin Su and Yu Lu and Shengfeng Pan and Bo Wen and Yunfeng Liu.
|
||||||
|
1. **[RWKV](https://huggingface.co/docs/transformers/model_doc/rwkv)** (from Bo Peng) released with the paper [this repo](https://github.com/BlinkDL/RWKV-LM) by Bo Peng.
|
||||||
1. **[SegFormer](https://huggingface.co/docs/transformers/model_doc/segformer)** (from NVIDIA) released with the paper [SegFormer: Simple and Efficient Design for Semantic Segmentation with Transformers](https://arxiv.org/abs/2105.15203) by Enze Xie, Wenhai Wang, Zhiding Yu, Anima Anandkumar, Jose M. Alvarez, Ping Luo.
|
1. **[SegFormer](https://huggingface.co/docs/transformers/model_doc/segformer)** (from NVIDIA) released with the paper [SegFormer: Simple and Efficient Design for Semantic Segmentation with Transformers](https://arxiv.org/abs/2105.15203) by Enze Xie, Wenhai Wang, Zhiding Yu, Anima Anandkumar, Jose M. Alvarez, Ping Luo.
|
||||||
|
1. **[Segment Anything](https://huggingface.co/docs/transformers/model_doc/sam)** (from Meta AI) released with the paper [Segment Anything](https://arxiv.org/pdf/2304.02643v1.pdf) by Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alex Berg, Wan-Yen Lo, Piotr Dollar, Ross Girshick.
|
||||||
1. **[SEW](https://huggingface.co/docs/transformers/model_doc/sew)** (from ASAPP) released with the paper [Performance-Efficiency Trade-offs in Unsupervised Pre-training for Speech Recognition](https://arxiv.org/abs/2109.06870) by Felix Wu, Kwangyoun Kim, Jing Pan, Kyu Han, Kilian Q. Weinberger, Yoav Artzi.
|
1. **[SEW](https://huggingface.co/docs/transformers/model_doc/sew)** (from ASAPP) released with the paper [Performance-Efficiency Trade-offs in Unsupervised Pre-training for Speech Recognition](https://arxiv.org/abs/2109.06870) by Felix Wu, Kwangyoun Kim, Jing Pan, Kyu Han, Kilian Q. Weinberger, Yoav Artzi.
|
||||||
1. **[SEW-D](https://huggingface.co/docs/transformers/model_doc/sew_d)** (from ASAPP) released with the paper [Performance-Efficiency Trade-offs in Unsupervised Pre-training for Speech Recognition](https://arxiv.org/abs/2109.06870) by Felix Wu, Kwangyoun Kim, Jing Pan, Kyu Han, Kilian Q. Weinberger, Yoav Artzi.
|
1. **[SEW-D](https://huggingface.co/docs/transformers/model_doc/sew_d)** (from ASAPP) released with the paper [Performance-Efficiency Trade-offs in Unsupervised Pre-training for Speech Recognition](https://arxiv.org/abs/2109.06870) by Felix Wu, Kwangyoun Kim, Jing Pan, Kyu Han, Kilian Q. Weinberger, Yoav Artzi.
|
||||||
|
1. **[SpeechT5](https://huggingface.co/docs/transformers/model_doc/speecht5)** (from Microsoft Research) released with the paper [SpeechT5: Unified-Modal Encoder-Decoder Pre-Training for Spoken Language Processing](https://arxiv.org/abs/2110.07205) by Junyi Ao, Rui Wang, Long Zhou, Chengyi Wang, Shuo Ren, Yu Wu, Shujie Liu, Tom Ko, Qing Li, Yu Zhang, Zhihua Wei, Yao Qian, Jinyu Li, Furu Wei.
|
||||||
1. **[SpeechToTextTransformer](https://huggingface.co/docs/transformers/model_doc/speech_to_text)** (from Facebook), released together with the paper [fairseq S2T: Fast Speech-to-Text Modeling with fairseq](https://arxiv.org/abs/2010.05171) by Changhan Wang, Yun Tang, Xutai Ma, Anne Wu, Dmytro Okhonko, Juan Pino.
|
1. **[SpeechToTextTransformer](https://huggingface.co/docs/transformers/model_doc/speech_to_text)** (from Facebook), released together with the paper [fairseq S2T: Fast Speech-to-Text Modeling with fairseq](https://arxiv.org/abs/2010.05171) by Changhan Wang, Yun Tang, Xutai Ma, Anne Wu, Dmytro Okhonko, Juan Pino.
|
||||||
1. **[SpeechToTextTransformer2](https://huggingface.co/docs/transformers/model_doc/speech_to_text_2)** (from Facebook), released together with the paper [Large-Scale Self- and Semi-Supervised Learning for Speech Translation](https://arxiv.org/abs/2104.06678) by Changhan Wang, Anne Wu, Juan Pino, Alexei Baevski, Michael Auli, Alexis Conneau.
|
1. **[SpeechToTextTransformer2](https://huggingface.co/docs/transformers/model_doc/speech_to_text_2)** (from Facebook), released together with the paper [Large-Scale Self- and Semi-Supervised Learning for Speech Translation](https://arxiv.org/abs/2104.06678) by Changhan Wang, Anne Wu, Juan Pino, Alexei Baevski, Michael Auli, Alexis Conneau.
|
||||||
1. **[Splinter](https://huggingface.co/docs/transformers/model_doc/splinter)** (from Tel Aviv University), released together with the paper [Few-Shot Question Answering by Pretraining Span Selection](https://arxiv.org/abs/2101.00438) by Ori Ram, Yuval Kirstain, Jonathan Berant, Amir Globerson, Omer Levy.
|
1. **[Splinter](https://huggingface.co/docs/transformers/model_doc/splinter)** (from Tel Aviv University), released together with the paper [Few-Shot Question Answering by Pretraining Span Selection](https://arxiv.org/abs/2101.00438) by Ori Ram, Yuval Kirstain, Jonathan Berant, Amir Globerson, Omer Levy.
|
||||||
1. **[SqueezeBERT](https://huggingface.co/docs/transformers/model_doc/squeezebert)** (from Berkeley) released with the paper [SqueezeBERT: What can computer vision teach NLP about efficient neural networks?](https://arxiv.org/abs/2006.11316) by Forrest N. Iandola, Albert E. Shaw, Ravi Krishna, and Kurt W. Keutzer.
|
1. **[SqueezeBERT](https://huggingface.co/docs/transformers/model_doc/squeezebert)** (from Berkeley) released with the paper [SqueezeBERT: What can computer vision teach NLP about efficient neural networks?](https://arxiv.org/abs/2006.11316) by Forrest N. Iandola, Albert E. Shaw, Ravi Krishna, and Kurt W. Keutzer.
|
||||||
|
1. **[SwiftFormer](https://huggingface.co/docs/transformers/model_doc/swiftformer)** (from MBZUAI) released with the paper [SwiftFormer: Efficient Additive Attention for Transformer-based Real-time Mobile Vision Applications](https://arxiv.org/abs/2303.15446) by Abdelrahman Shaker, Muhammad Maaz, Hanoona Rasheed, Salman Khan, Ming-Hsuan Yang, Fahad Shahbaz Khan.
|
||||||
1. **[Swin Transformer](https://huggingface.co/docs/transformers/model_doc/swin)** (from Microsoft) released with the paper [Swin Transformer: Hierarchical Vision Transformer using Shifted Windows](https://arxiv.org/abs/2103.14030) by Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, Baining Guo.
|
1. **[Swin Transformer](https://huggingface.co/docs/transformers/model_doc/swin)** (from Microsoft) released with the paper [Swin Transformer: Hierarchical Vision Transformer using Shifted Windows](https://arxiv.org/abs/2103.14030) by Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, Baining Guo.
|
||||||
1. **[Swin Transformer V2](https://huggingface.co/docs/transformers/model_doc/swinv2)** (from Microsoft) released with the paper [Swin Transformer V2: Scaling Up Capacity and Resolution](https://arxiv.org/abs/2111.09883) by Ze Liu, Han Hu, Yutong Lin, Zhuliang Yao, Zhenda Xie, Yixuan Wei, Jia Ning, Yue Cao, Zheng Zhang, Li Dong, Furu Wei, Baining Guo.
|
1. **[Swin Transformer V2](https://huggingface.co/docs/transformers/model_doc/swinv2)** (from Microsoft) released with the paper [Swin Transformer V2: Scaling Up Capacity and Resolution](https://arxiv.org/abs/2111.09883) by Ze Liu, Han Hu, Yutong Lin, Zhuliang Yao, Zhenda Xie, Yixuan Wei, Jia Ning, Yue Cao, Zheng Zhang, Li Dong, Furu Wei, Baining Guo.
|
||||||
|
1. **[Swin2SR](https://huggingface.co/docs/transformers/model_doc/swin2sr)** (from University of Würzburg) released with the paper [Swin2SR: SwinV2 Transformer for Compressed Image Super-Resolution and Restoration](https://arxiv.org/abs/2209.11345) by Marcos V. Conde, Ui-Jin Choi, Maxime Burchi, Radu Timofte.
|
||||||
|
1. **[SwitchTransformers](https://huggingface.co/docs/transformers/model_doc/switch_transformers)** (from Google) released with the paper [Switch Transformers: Scaling to Trillion Parameter Models with Simple and Efficient Sparsity](https://arxiv.org/abs/2101.03961) by William Fedus, Barret Zoph, Noam Shazeer.
|
||||||
1. **[T5](https://huggingface.co/docs/transformers/model_doc/t5)** (from Google AI) released with the paper [Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer](https://arxiv.org/abs/1910.10683) by Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu.
|
1. **[T5](https://huggingface.co/docs/transformers/model_doc/t5)** (from Google AI) released with the paper [Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer](https://arxiv.org/abs/1910.10683) by Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu.
|
||||||
1. **[T5v1.1](https://huggingface.co/docs/transformers/model_doc/t5v1.1)** (from Google AI) released in the repository [google-research/text-to-text-transfer-transformer](https://github.com/google-research/text-to-text-transfer-transformer/blob/main/released_checkpoints.md#t511) by Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu.
|
1. **[T5v1.1](https://huggingface.co/docs/transformers/model_doc/t5v1.1)** (from Google AI) released in the repository [google-research/text-to-text-transfer-transformer](https://github.com/google-research/text-to-text-transfer-transformer/blob/main/released_checkpoints.md#t511) by Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu.
|
||||||
|
1. **[Table Transformer](https://huggingface.co/docs/transformers/model_doc/table-transformer)** (from Microsoft Research) released with the paper [PubTables-1M: Towards Comprehensive Table Extraction From Unstructured Documents](https://arxiv.org/abs/2110.00061) by Brandon Smock, Rohith Pesala, Robin Abraham.
|
||||||
1. **[TAPAS](https://huggingface.co/docs/transformers/model_doc/tapas)** (from Google AI) released with the paper [TAPAS: Weakly Supervised Table Parsing via Pre-training](https://arxiv.org/abs/2004.02349) by Jonathan Herzig, Paweł Krzysztof Nowak, Thomas Müller, Francesco Piccinno and Julian Martin Eisenschlos.
|
1. **[TAPAS](https://huggingface.co/docs/transformers/model_doc/tapas)** (from Google AI) released with the paper [TAPAS: Weakly Supervised Table Parsing via Pre-training](https://arxiv.org/abs/2004.02349) by Jonathan Herzig, Paweł Krzysztof Nowak, Thomas Müller, Francesco Piccinno and Julian Martin Eisenschlos.
|
||||||
1. **[TAPEX](https://huggingface.co/docs/transformers/model_doc/tapex)** (from Microsoft Research) released with the paper [TAPEX: Table Pre-training via Learning a Neural SQL Executor](https://arxiv.org/abs/2107.07653) by Qian Liu, Bei Chen, Jiaqi Guo, Morteza Ziyadi, Zeqi Lin, Weizhu Chen, Jian-Guang Lou.
|
1. **[TAPEX](https://huggingface.co/docs/transformers/model_doc/tapex)** (from Microsoft Research) released with the paper [TAPEX: Table Pre-training via Learning a Neural SQL Executor](https://arxiv.org/abs/2107.07653) by Qian Liu, Bei Chen, Jiaqi Guo, Morteza Ziyadi, Zeqi Lin, Weizhu Chen, Jian-Guang Lou.
|
||||||
1. **[Time Series Transformer](https://huggingface.co/docs/transformers/model_doc/time_series_transformer)** (from HuggingFace).
|
1. **[Time Series Transformer](https://huggingface.co/docs/transformers/model_doc/time_series_transformer)** (from HuggingFace).
|
||||||
|
1. **[TimeSformer](https://huggingface.co/docs/transformers/model_doc/timesformer)** (from Facebook) released with the paper [Is Space-Time Attention All You Need for Video Understanding?](https://arxiv.org/abs/2102.05095) by Gedas Bertasius, Heng Wang, Lorenzo Torresani.
|
||||||
1. **[Trajectory Transformer](https://huggingface.co/docs/transformers/model_doc/trajectory_transformers)** (from the University of California at Berkeley) released with the paper [Offline Reinforcement Learning as One Big Sequence Modeling Problem](https://arxiv.org/abs/2106.02039) by Michael Janner, Qiyang Li, Sergey Levine
|
1. **[Trajectory Transformer](https://huggingface.co/docs/transformers/model_doc/trajectory_transformers)** (from the University of California at Berkeley) released with the paper [Offline Reinforcement Learning as One Big Sequence Modeling Problem](https://arxiv.org/abs/2106.02039) by Michael Janner, Qiyang Li, Sergey Levine
|
||||||
1. **[Transformer-XL](https://huggingface.co/docs/transformers/model_doc/transfo-xl)** (from Google/CMU) released with the paper [Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context](https://arxiv.org/abs/1901.02860) by Zihang Dai*, Zhilin Yang*, Yiming Yang, Jaime Carbonell, Quoc V. Le, Ruslan Salakhutdinov.
|
1. **[Transformer-XL](https://huggingface.co/docs/transformers/model_doc/transfo-xl)** (from Google/CMU) released with the paper [Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context](https://arxiv.org/abs/1901.02860) by Zihang Dai*, Zhilin Yang*, Yiming Yang, Jaime Carbonell, Quoc V. Le, Ruslan Salakhutdinov.
|
||||||
1. **[TrOCR](https://huggingface.co/docs/transformers/model_doc/trocr)** (from Microsoft), released together with the paper [TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models](https://arxiv.org/abs/2109.10282) by Minghao Li, Tengchao Lv, Lei Cui, Yijuan Lu, Dinei Florencio, Cha Zhang, Zhoujun Li, Furu Wei.
|
1. **[TrOCR](https://huggingface.co/docs/transformers/model_doc/trocr)** (from Microsoft), released together with the paper [TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models](https://arxiv.org/abs/2109.10282) by Minghao Li, Tengchao Lv, Lei Cui, Yijuan Lu, Dinei Florencio, Cha Zhang, Zhoujun Li, Furu Wei.
|
||||||
|
1. **[TVLT](https://huggingface.co/docs/transformers/model_doc/tvlt)** (from UNC Chapel Hill) released with the paper [TVLT: Textless Vision-Language Transformer](https://arxiv.org/abs/2209.14156) by Zineng Tang, Jaemin Cho, Yixin Nie, Mohit Bansal.
|
||||||
1. **[UL2](https://huggingface.co/docs/transformers/model_doc/ul2)** (from Google Research) released with the paper [Unifying Language Learning Paradigms](https://arxiv.org/abs/2205.05131v1) by Yi Tay, Mostafa Dehghani, Vinh Q. Tran, Xavier Garcia, Dara Bahri, Tal Schuster, Huaixiu Steven Zheng, Neil Houlsby, Donald Metzler
|
1. **[UL2](https://huggingface.co/docs/transformers/model_doc/ul2)** (from Google Research) released with the paper [Unifying Language Learning Paradigms](https://arxiv.org/abs/2205.05131v1) by Yi Tay, Mostafa Dehghani, Vinh Q. Tran, Xavier Garcia, Dara Bahri, Tal Schuster, Huaixiu Steven Zheng, Neil Houlsby, Donald Metzler
|
||||||
|
1. **[UMT5](https://huggingface.co/docs/transformers/model_doc/umt5)** (from Google Research) released with the paper [UniMax: Fairer and More Effective Language Sampling for Large-Scale Multilingual Pretraining](https://openreview.net/forum?id=kXwdL1cWOAi) by Hyung Won Chung, Xavier Garcia, Adam Roberts, Yi Tay, Orhan Firat, Sharan Narang, Noah Constant.
|
||||||
1. **[UniSpeech](https://huggingface.co/docs/transformers/model_doc/unispeech)** (from Microsoft Research) released with the paper [UniSpeech: Unified Speech Representation Learning with Labeled and Unlabeled Data](https://arxiv.org/abs/2101.07597) by Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei, Michael Zeng, Xuedong Huang.
|
1. **[UniSpeech](https://huggingface.co/docs/transformers/model_doc/unispeech)** (from Microsoft Research) released with the paper [UniSpeech: Unified Speech Representation Learning with Labeled and Unlabeled Data](https://arxiv.org/abs/2101.07597) by Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei, Michael Zeng, Xuedong Huang.
|
||||||
1. **[UniSpeechSat](https://huggingface.co/docs/transformers/model_doc/unispeech-sat)** (from Microsoft Research) released with the paper [UNISPEECH-SAT: UNIVERSAL SPEECH REPRESENTATION LEARNING WITH SPEAKER AWARE PRE-TRAINING](https://arxiv.org/abs/2110.05752) by Sanyuan Chen, Yu Wu, Chengyi Wang, Zhengyang Chen, Zhuo Chen, Shujie Liu, Jian Wu, Yao Qian, Furu Wei, Jinyu Li, Xiangzhan Yu.
|
1. **[UniSpeechSat](https://huggingface.co/docs/transformers/model_doc/unispeech-sat)** (from Microsoft Research) released with the paper [UNISPEECH-SAT: UNIVERSAL SPEECH REPRESENTATION LEARNING WITH SPEAKER AWARE PRE-TRAINING](https://arxiv.org/abs/2110.05752) by Sanyuan Chen, Yu Wu, Chengyi Wang, Zhengyang Chen, Zhuo Chen, Shujie Liu, Jian Wu, Yao Qian, Furu Wei, Jinyu Li, Xiangzhan Yu.
|
||||||
|
1. **[UPerNet](https://huggingface.co/docs/transformers/model_doc/upernet)** (from Peking University) released with the paper [Unified Perceptual Parsing for Scene Understanding](https://arxiv.org/abs/1807.10221) by Tete Xiao, Yingcheng Liu, Bolei Zhou, Yuning Jiang, Jian Sun.
|
||||||
1. **[VAN](https://huggingface.co/docs/transformers/model_doc/van)** (from Tsinghua University and Nankai University) released with the paper [Visual Attention Network](https://arxiv.org/abs/2202.09741) by Meng-Hao Guo, Cheng-Ze Lu, Zheng-Ning Liu, Ming-Ming Cheng, Shi-Min Hu.
|
1. **[VAN](https://huggingface.co/docs/transformers/model_doc/van)** (from Tsinghua University and Nankai University) released with the paper [Visual Attention Network](https://arxiv.org/abs/2202.09741) by Meng-Hao Guo, Cheng-Ze Lu, Zheng-Ning Liu, Ming-Ming Cheng, Shi-Min Hu.
|
||||||
1. **[VideoMAE](https://huggingface.co/docs/transformers/model_doc/videomae)** (from Multimedia Computing Group, Nanjing University) released with the paper [VideoMAE: Masked Autoencoders are Data-Efficient Learners for Self-Supervised Video Pre-Training](https://arxiv.org/abs/2203.12602) by Zhan Tong, Yibing Song, Jue Wang, Limin Wang.
|
1. **[VideoMAE](https://huggingface.co/docs/transformers/model_doc/videomae)** (from Multimedia Computing Group, Nanjing University) released with the paper [VideoMAE: Masked Autoencoders are Data-Efficient Learners for Self-Supervised Video Pre-Training](https://arxiv.org/abs/2203.12602) by Zhan Tong, Yibing Song, Jue Wang, Limin Wang.
|
||||||
1. **[ViLT](https://huggingface.co/docs/transformers/model_doc/vilt)** (from NAVER AI Lab/Kakao Enterprise/Kakao Brain) released with the paper [ViLT: Vision-and-Language Transformer Without Convolution or Region Supervision](https://arxiv.org/abs/2102.03334) by Wonjae Kim, Bokyung Son, Ildoo Kim.
|
1. **[ViLT](https://huggingface.co/docs/transformers/model_doc/vilt)** (from NAVER AI Lab/Kakao Enterprise/Kakao Brain) released with the paper [ViLT: Vision-and-Language Transformer Without Convolution or Region Supervision](https://arxiv.org/abs/2102.03334) by Wonjae Kim, Bokyung Son, Ildoo Kim.
|
||||||
1. **[Vision Transformer (ViT)](https://huggingface.co/docs/transformers/model_doc/vit)** (from Google AI) released with the paper [An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale](https://arxiv.org/abs/2010.11929) by Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, Neil Houlsby.
|
1. **[Vision Transformer (ViT)](https://huggingface.co/docs/transformers/model_doc/vit)** (from Google AI) released with the paper [An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale](https://arxiv.org/abs/2010.11929) by Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, Neil Houlsby.
|
||||||
1. **[VisualBERT](https://huggingface.co/docs/transformers/model_doc/visual_bert)** (from UCLA NLP) released with the paper [VisualBERT: A Simple and Performant Baseline for Vision and Language](https://arxiv.org/pdf/1908.03557) by Liunian Harold Li, Mark Yatskar, Da Yin, Cho-Jui Hsieh, Kai-Wei Chang.
|
1. **[VisualBERT](https://huggingface.co/docs/transformers/model_doc/visual_bert)** (from UCLA NLP) released with the paper [VisualBERT: A Simple and Performant Baseline for Vision and Language](https://arxiv.org/pdf/1908.03557) by Liunian Harold Li, Mark Yatskar, Da Yin, Cho-Jui Hsieh, Kai-Wei Chang.
|
||||||
|
1. **[ViT Hybrid](https://huggingface.co/docs/transformers/model_doc/vit_hybrid)** (from Google AI) released with the paper [An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale](https://arxiv.org/abs/2010.11929) by Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, Neil Houlsby.
|
||||||
1. **[ViTMAE](https://huggingface.co/docs/transformers/model_doc/vit_mae)** (from Meta AI) released with the paper [Masked Autoencoders Are Scalable Vision Learners](https://arxiv.org/abs/2111.06377) by Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Dollár, Ross Girshick.
|
1. **[ViTMAE](https://huggingface.co/docs/transformers/model_doc/vit_mae)** (from Meta AI) released with the paper [Masked Autoencoders Are Scalable Vision Learners](https://arxiv.org/abs/2111.06377) by Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Dollár, Ross Girshick.
|
||||||
1. **[ViTMSN](https://huggingface.co/docs/transformers/model_doc/vit_msn)** (from Meta AI) released with the paper [Masked Siamese Networks for Label-Efficient Learning](https://arxiv.org/abs/2204.07141) by Mahmoud Assran, Mathilde Caron, Ishan Misra, Piotr Bojanowski, Florian Bordes, Pascal Vincent, Armand Joulin, Michael Rabbat, Nicolas Ballas.
|
1. **[ViTMSN](https://huggingface.co/docs/transformers/model_doc/vit_msn)** (from Meta AI) released with the paper [Masked Siamese Networks for Label-Efficient Learning](https://arxiv.org/abs/2204.07141) by Mahmoud Assran, Mathilde Caron, Ishan Misra, Piotr Bojanowski, Florian Bordes, Pascal Vincent, Armand Joulin, Michael Rabbat, Nicolas Ballas.
|
||||||
|
1. **[ViViT](https://huggingface.co/docs/transformers/model_doc/vivit)** (from Google Research) released with the paper [ViViT: A Video Vision Transformer](https://arxiv.org/abs/2103.15691) by Anurag Arnab, Mostafa Dehghani, Georg Heigold, Chen Sun, Mario Lučić, Cordelia Schmid.
|
||||||
1. **[Wav2Vec2](https://huggingface.co/docs/transformers/model_doc/wav2vec2)** (from Facebook AI) released with the paper [wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations](https://arxiv.org/abs/2006.11477) by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli.
|
1. **[Wav2Vec2](https://huggingface.co/docs/transformers/model_doc/wav2vec2)** (from Facebook AI) released with the paper [wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations](https://arxiv.org/abs/2006.11477) by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli.
|
||||||
1. **[Wav2Vec2-Conformer](https://huggingface.co/docs/transformers/model_doc/wav2vec2-conformer)** (from Facebook AI) released with the paper [FAIRSEQ S2T: Fast Speech-to-Text Modeling with FAIRSEQ](https://arxiv.org/abs/2010.05171) by Changhan Wang, Yun Tang, Xutai Ma, Anne Wu, Sravya Popuri, Dmytro Okhonko, Juan Pino.
|
1. **[Wav2Vec2-Conformer](https://huggingface.co/docs/transformers/model_doc/wav2vec2-conformer)** (from Facebook AI) released with the paper [FAIRSEQ S2T: Fast Speech-to-Text Modeling with FAIRSEQ](https://arxiv.org/abs/2010.05171) by Changhan Wang, Yun Tang, Xutai Ma, Anne Wu, Sravya Popuri, Dmytro Okhonko, Juan Pino.
|
||||||
1. **[Wav2Vec2Phoneme](https://huggingface.co/docs/transformers/model_doc/wav2vec2_phoneme)** (from Facebook AI) released with the paper [Simple and Effective Zero-shot Cross-lingual Phoneme Recognition](https://arxiv.org/abs/2109.11680) by Qiantong Xu, Alexei Baevski, Michael Auli.
|
1. **[Wav2Vec2Phoneme](https://huggingface.co/docs/transformers/model_doc/wav2vec2_phoneme)** (from Facebook AI) released with the paper [Simple and Effective Zero-shot Cross-lingual Phoneme Recognition](https://arxiv.org/abs/2109.11680) by Qiantong Xu, Alexei Baevski, Michael Auli.
|
||||||
1. **[WavLM](https://huggingface.co/docs/transformers/model_doc/wavlm)** (from Microsoft Research) released with the paper [WavLM: Large-Scale Self-Supervised Pre-Training for Full Stack Speech Processing](https://arxiv.org/abs/2110.13900) by Sanyuan Chen, Chengyi Wang, Zhengyang Chen, Yu Wu, Shujie Liu, Zhuo Chen, Jinyu Li, Naoyuki Kanda, Takuya Yoshioka, Xiong Xiao, Jian Wu, Long Zhou, Shuo Ren, Yanmin Qian, Yao Qian, Jian Wu, Michael Zeng, Furu Wei.
|
1. **[WavLM](https://huggingface.co/docs/transformers/model_doc/wavlm)** (from Microsoft Research) released with the paper [WavLM: Large-Scale Self-Supervised Pre-Training for Full Stack Speech Processing](https://arxiv.org/abs/2110.13900) by Sanyuan Chen, Chengyi Wang, Zhengyang Chen, Yu Wu, Shujie Liu, Zhuo Chen, Jinyu Li, Naoyuki Kanda, Takuya Yoshioka, Xiong Xiao, Jian Wu, Long Zhou, Shuo Ren, Yanmin Qian, Yao Qian, Jian Wu, Michael Zeng, Furu Wei.
|
||||||
1. **[Whisper](https://huggingface.co/docs/transformers/model_doc/whisper)** (from OpenAI) released with the paper [Robust Speech Recognition via Large-Scale Weak Supervision](https://cdn.openai.com/papers/whisper.pdf) by Alec Radford, Jong Wook Kim, Tao Xu, Greg Brockman, Christine McLeavey, Ilya Sutskever.
|
1. **[Whisper](https://huggingface.co/docs/transformers/model_doc/whisper)** (from OpenAI) released with the paper [Robust Speech Recognition via Large-Scale Weak Supervision](https://cdn.openai.com/papers/whisper.pdf) by Alec Radford, Jong Wook Kim, Tao Xu, Greg Brockman, Christine McLeavey, Ilya Sutskever.
|
||||||
1. **[X-CLIP](https://huggingface.co/docs/transformers/model_doc/xclip)** (from Microsoft Research) released with the paper [Expanding Language-Image Pretrained Models for General Video Recognition](https://arxiv.org/abs/2208.02816) by Bolin Ni, Houwen Peng, Minghao Chen, Songyang Zhang, Gaofeng Meng, Jianlong Fu, Shiming Xiang, Haibin Ling.
|
1. **[X-CLIP](https://huggingface.co/docs/transformers/model_doc/xclip)** (from Microsoft Research) released with the paper [Expanding Language-Image Pretrained Models for General Video Recognition](https://arxiv.org/abs/2208.02816) by Bolin Ni, Houwen Peng, Minghao Chen, Songyang Zhang, Gaofeng Meng, Jianlong Fu, Shiming Xiang, Haibin Ling.
|
||||||
|
1. **[X-MOD](https://huggingface.co/docs/transformers/model_doc/xmod)** (from Meta AI) released with the paper [Lifting the Curse of Multilinguality by Pre-training Modular Transformers](http://dx.doi.org/10.18653/v1/2022.naacl-main.255) by Jonas Pfeiffer, Naman Goyal, Xi Lin, Xian Li, James Cross, Sebastian Riedel, Mikel Artetxe.
|
||||||
1. **[XGLM](https://huggingface.co/docs/transformers/model_doc/xglm)** (From Facebook AI) released with the paper [Few-shot Learning with Multilingual Language Models](https://arxiv.org/abs/2112.10668) by Xi Victoria Lin, Todor Mihaylov, Mikel Artetxe, Tianlu Wang, Shuohui Chen, Daniel Simig, Myle Ott, Naman Goyal, Shruti Bhosale, Jingfei Du, Ramakanth Pasunuru, Sam Shleifer, Punit Singh Koura, Vishrav Chaudhary, Brian O'Horo, Jeff Wang, Luke Zettlemoyer, Zornitsa Kozareva, Mona Diab, Veselin Stoyanov, Xian Li.
|
1. **[XGLM](https://huggingface.co/docs/transformers/model_doc/xglm)** (From Facebook AI) released with the paper [Few-shot Learning with Multilingual Language Models](https://arxiv.org/abs/2112.10668) by Xi Victoria Lin, Todor Mihaylov, Mikel Artetxe, Tianlu Wang, Shuohui Chen, Daniel Simig, Myle Ott, Naman Goyal, Shruti Bhosale, Jingfei Du, Ramakanth Pasunuru, Sam Shleifer, Punit Singh Koura, Vishrav Chaudhary, Brian O'Horo, Jeff Wang, Luke Zettlemoyer, Zornitsa Kozareva, Mona Diab, Veselin Stoyanov, Xian Li.
|
||||||
1. **[XLM](https://huggingface.co/docs/transformers/model_doc/xlm)** (from Facebook) released together with the paper [Cross-lingual Language Model Pretraining](https://arxiv.org/abs/1901.07291) by Guillaume Lample and Alexis Conneau.
|
1. **[XLM](https://huggingface.co/docs/transformers/model_doc/xlm)** (from Facebook) released together with the paper [Cross-lingual Language Model Pretraining](https://arxiv.org/abs/1901.07291) by Guillaume Lample and Alexis Conneau.
|
||||||
1. **[XLM-ProphetNet](https://huggingface.co/docs/transformers/model_doc/xlm-prophetnet)** (from Microsoft Research) released with the paper [ProphetNet: Predicting Future N-gram for Sequence-to-Sequence Pre-training](https://arxiv.org/abs/2001.04063) by Yu Yan, Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang and Ming Zhou.
|
1. **[XLM-ProphetNet](https://huggingface.co/docs/transformers/model_doc/xlm-prophetnet)** (from Microsoft Research) released with the paper [ProphetNet: Predicting Future N-gram for Sequence-to-Sequence Pre-training](https://arxiv.org/abs/2001.04063) by Yu Yan, Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang and Ming Zhou.
|
||||||
1. **[XLM-RoBERTa](https://huggingface.co/docs/transformers/model_doc/xlm-roberta)** (from Facebook AI), released together with the paper [Unsupervised Cross-lingual Representation Learning at Scale](https://arxiv.org/abs/1911.02116) by Alexis Conneau*, Kartikay Khandelwal*, Naman Goyal, Vishrav Chaudhary, Guillaume Wenzek, Francisco Guzmán, Edouard Grave, Myle Ott, Luke Zettlemoyer and Veselin Stoyanov.
|
1. **[XLM-RoBERTa](https://huggingface.co/docs/transformers/model_doc/xlm-roberta)** (from Facebook AI), released together with the paper [Unsupervised Cross-lingual Representation Learning at Scale](https://arxiv.org/abs/1911.02116) by Alexis Conneau*, Kartikay Khandelwal*, Naman Goyal, Vishrav Chaudhary, Guillaume Wenzek, Francisco Guzmán, Edouard Grave, Myle Ott, Luke Zettlemoyer and Veselin Stoyanov.
|
||||||
1. **[XLM-RoBERTa-XL](https://huggingface.co/docs/transformers/model_doc/xlm-roberta-xl)** (from Facebook AI), released together with the paper [Larger-Scale Transformers for Multilingual Masked Language Modeling](https://arxiv.org/abs/2105.00572) by Naman Goyal, Jingfei Du, Myle Ott, Giri Anantharaman, Alexis Conneau.
|
1. **[XLM-RoBERTa-XL](https://huggingface.co/docs/transformers/model_doc/xlm-roberta-xl)** (from Facebook AI), released together with the paper [Larger-Scale Transformers for Multilingual Masked Language Modeling](https://arxiv.org/abs/2105.00572) by Naman Goyal, Jingfei Du, Myle Ott, Giri Anantharaman, Alexis Conneau.
|
||||||
|
1. **[XLM-V](https://huggingface.co/docs/transformers/model_doc/xlm-v)** (from Meta AI) released with the paper [XLM-V: Overcoming the Vocabulary Bottleneck in Multilingual Masked Language Models](https://arxiv.org/abs/2301.10472) by Davis Liang, Hila Gonen, Yuning Mao, Rui Hou, Naman Goyal, Marjan Ghazvininejad, Luke Zettlemoyer, Madian Khabsa.
|
||||||
1. **[XLNet](https://huggingface.co/docs/transformers/model_doc/xlnet)** (from Google/CMU) released with the paper [XLNet: Generalized Autoregressive Pretraining for Language Understanding](https://arxiv.org/abs/1906.08237) by Zhilin Yang*, Zihang Dai*, Yiming Yang, Jaime Carbonell, Ruslan Salakhutdinov, Quoc V. Le.
|
1. **[XLNet](https://huggingface.co/docs/transformers/model_doc/xlnet)** (from Google/CMU) released with the paper [XLNet: Generalized Autoregressive Pretraining for Language Understanding](https://arxiv.org/abs/1906.08237) by Zhilin Yang*, Zihang Dai*, Yiming Yang, Jaime Carbonell, Ruslan Salakhutdinov, Quoc V. Le.
|
||||||
1. **[XLS-R](https://huggingface.co/docs/transformers/model_doc/xls_r)** (from Facebook AI) released with the paper [XLS-R: Self-supervised Cross-lingual Speech Representation Learning at Scale](https://arxiv.org/abs/2111.09296) by Arun Babu, Changhan Wang, Andros Tjandra, Kushal Lakhotia, Qiantong Xu, Naman Goyal, Kritika Singh, Patrick von Platen, Yatharth Saraf, Juan Pino, Alexei Baevski, Alexis Conneau, Michael Auli.
|
1. **[XLS-R](https://huggingface.co/docs/transformers/model_doc/xls_r)** (from Facebook AI) released with the paper [XLS-R: Self-supervised Cross-lingual Speech Representation Learning at Scale](https://arxiv.org/abs/2111.09296) by Arun Babu, Changhan Wang, Andros Tjandra, Kushal Lakhotia, Qiantong Xu, Naman Goyal, Kritika Singh, Patrick von Platen, Yatharth Saraf, Juan Pino, Alexei Baevski, Alexis Conneau, Michael Auli.
|
||||||
1. **[XLSR-Wav2Vec2](https://huggingface.co/docs/transformers/model_doc/xlsr_wav2vec2)** (from Facebook AI) released with the paper [Unsupervised Cross-Lingual Representation Learning For Speech Recognition](https://arxiv.org/abs/2006.13979) by Alexis Conneau, Alexei Baevski, Ronan Collobert, Abdelrahman Mohamed, Michael Auli.
|
1. **[XLSR-Wav2Vec2](https://huggingface.co/docs/transformers/model_doc/xlsr_wav2vec2)** (from Facebook AI) released with the paper [Unsupervised Cross-Lingual Representation Learning For Speech Recognition](https://arxiv.org/abs/2006.13979) by Alexis Conneau, Alexei Baevski, Ronan Collobert, Abdelrahman Mohamed, Michael Auli.
|
||||||
|
|||||||
487
README_hd.md
Normal file
487
README_hd.md
Normal file
@ -0,0 +1,487 @@
|
|||||||
|
<!---
|
||||||
|
Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
-->
|
||||||
|
|
||||||
|
<!---
|
||||||
|
A useful guide for English-Hindi translation of Hugging Face documentation
|
||||||
|
- Add space around English words and numbers when they appear between Hindi characters. E.g., कुल मिलाकर 100 से अधिक भाषाएँ; ट्रांसफॉर्मर लाइब्रेरी का उपयोग करता है।
|
||||||
|
- वर्गाकार उद्धरणों का प्रयोग करें, जैसे, "उद्धरण"
|
||||||
|
|
||||||
|
Dictionary
|
||||||
|
|
||||||
|
Hugging Face: गले लगाओ चेहरा
|
||||||
|
token: शब्द (और मूल अंग्रेजी को कोष्ठक में चिह्नित करें)
|
||||||
|
tokenize: टोकननाइज़ करें (और मूल अंग्रेज़ी को चिह्नित करने के लिए कोष्ठक का उपयोग करें)
|
||||||
|
tokenizer: Tokenizer (मूल अंग्रेजी में कोष्ठक के साथ)
|
||||||
|
transformer: transformer
|
||||||
|
pipeline: समनुक्रम
|
||||||
|
API: API (अनुवाद के बिना)
|
||||||
|
inference: विचार
|
||||||
|
Trainer: प्रशिक्षक। कक्षा के नाम के रूप में प्रस्तुत किए जाने पर अनुवादित नहीं किया गया।
|
||||||
|
pretrained/pretrain: पूर्व प्रशिक्षण
|
||||||
|
finetune: फ़ाइन ट्यूनिंग
|
||||||
|
community: समुदाय
|
||||||
|
example: जब विशिष्ट गोदाम example कैटलॉग करते समय "केस केस" के रूप में अनुवादित
|
||||||
|
Python data structures (e.g., list, set, dict): मूल अंग्रेजी को चिह्नित करने के लिए सूचियों, सेटों, शब्दकोशों में अनुवाद करें और कोष्ठक का उपयोग करें
|
||||||
|
NLP/Natural Language Processing: द्वारा NLP अनुवाद के बिना प्रकट होते हैं Natural Language Processing प्रस्तुत किए जाने पर प्राकृतिक भाषा संसाधन में अनुवाद करें
|
||||||
|
checkpoint: जाँच बिंदु
|
||||||
|
-->
|
||||||
|
|
||||||
|
<p align="center">
|
||||||
|
<br>
|
||||||
|
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers_logo_name.png" width="400"/>
|
||||||
|
<br>
|
||||||
|
<p>
|
||||||
|
<p align="center">
|
||||||
|
<a href="https://circleci.com/gh/huggingface/transformers">
|
||||||
|
<img alt="Build" src="https://img.shields.io/circleci/build/github/huggingface/transformers/main">
|
||||||
|
</a>
|
||||||
|
<a href="https://github.com/huggingface/transformers/blob/main/LICENSE">
|
||||||
|
<img alt="GitHub" src="https://img.shields.io/github/license/huggingface/transformers.svg?color=blue">
|
||||||
|
</a>
|
||||||
|
<a href="https://huggingface.co/docs/transformers/index">
|
||||||
|
<img alt="Documentation" src="https://img.shields.io/website/http/huggingface.co/docs/transformers/index.svg?down_color=red&down_message=offline&up_message=online">
|
||||||
|
</a>
|
||||||
|
<a href="https://github.com/huggingface/transformers/releases">
|
||||||
|
<img alt="GitHub release" src="https://img.shields.io/github/release/huggingface/transformers.svg">
|
||||||
|
</a>
|
||||||
|
<a href="https://github.com/huggingface/transformers/blob/main/CODE_OF_CONDUCT.md">
|
||||||
|
<img alt="Contributor Covenant" src="https://img.shields.io/badge/Contributor%20Covenant-v2.0%20adopted-ff69b4.svg">
|
||||||
|
</a>
|
||||||
|
<a href="https://zenodo.org/badge/latestdoi/155220641"><img src="https://zenodo.org/badge/155220641.svg" alt="DOI"></a>
|
||||||
|
</p>
|
||||||
|
|
||||||
|
<h4 align="center">
|
||||||
|
<p>
|
||||||
|
<a href="https://github.com/huggingface/transformers/">English</a> |
|
||||||
|
<a href="https://github.com/huggingface/transformers/blob/main/README_zh-hans.md">简体中文</a> |
|
||||||
|
<a href="https://github.com/huggingface/transformers/blob/main/README_zh-hant.md">繁體中文</a> |
|
||||||
|
<a href="https://github.com/huggingface/transformers/blob/main/README_ko.md">한국어</a> |
|
||||||
|
<a href="https://github.com/huggingface/transformers/blob/main/README_es.md">Español</a> |
|
||||||
|
<a href="https://github.com/huggingface/transformers/blob/main/README_ja.md">日本語</a> |
|
||||||
|
<b>हिन्दी</b> |
|
||||||
|
<p>
|
||||||
|
</h4>
|
||||||
|
|
||||||
|
<h3 align="center">
|
||||||
|
<p>Jax, PyTorch और TensorFlow के लिए उन्नत मशीन लर्निंग</p>
|
||||||
|
</h3>
|
||||||
|
|
||||||
|
<h3 align="center">
|
||||||
|
<a href="https://hf.co/course"><img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/course_banner.png"></a>
|
||||||
|
</h3>
|
||||||
|
|
||||||
|
🤗 Transformers 100 से अधिक भाषाओं में पाठ वर्गीकरण, सूचना निष्कर्षण, प्रश्न उत्तर, सारांशीकरण, अनुवाद, पाठ निर्माण का समर्थन करने के लिए हजारों पूर्व-प्रशिक्षित मॉडल प्रदान करता है। इसका उद्देश्य सबसे उन्नत एनएलपी तकनीक को सभी के लिए सुलभ बनाना है।
|
||||||
|
|
||||||
|
🤗 Transformers त्वरित डाउनलोड और उपयोग के लिए एक एपीआई प्रदान करता है, जिससे आप किसी दिए गए पाठ पर एक पूर्व-प्रशिक्षित मॉडल ले सकते हैं, इसे अपने डेटासेट पर ठीक कर सकते हैं और इसे [मॉडल हब] (https://huggingface.co/models) के माध्यम से समुदाय के साथ साझा कर सकते हैं। ) . इसी समय, प्रत्येक परिभाषित पायथन मॉड्यूल पूरी तरह से स्वतंत्र है, जो संशोधन और तेजी से अनुसंधान प्रयोगों के लिए सुविधाजनक है।
|
||||||
|
|
||||||
|
🤗 Transformers तीन सबसे लोकप्रिय गहन शिक्षण पुस्तकालयों का समर्थन करता है: [Jax](https://jax.readthedocs.io/en/latest/), [PyTorch](https://pytorch.org/) and [TensorFlow](https://www.tensorflow.org/) — और इसके साथ निर्बाध रूप से एकीकृत होता है। आप अपने मॉडल को सीधे एक ढांचे के साथ प्रशिक्षित कर सकते हैं और दूसरे के साथ लोड और अनुमान लगा सकते हैं।
|
||||||
|
|
||||||
|
## ऑनलाइन डेमो
|
||||||
|
|
||||||
|
आप सबसे सीधे मॉडल पृष्ठ पर परीक्षण कर सकते हैं [model hub](https://huggingface.co/models) मॉडल पर। हम [निजी मॉडल होस्टिंग, मॉडल संस्करण, और अनुमान एपीआई] भी प्रदान करते हैं।(https://huggingface.co/pricing)。
|
||||||
|
|
||||||
|
यहाँ कुछ उदाहरण हैं:
|
||||||
|
- [शब्द को भरने के लिए मास्क के रूप में BERT का प्रयोग करें](https://huggingface.co/bert-base-uncased?text=Paris+is+the+%5BMASK%5D+of+France)
|
||||||
|
- [इलेक्ट्रा के साथ नामित इकाई पहचान](https://huggingface.co/dbmdz/electra-large-discriminator-finetuned-conll03-english?text=My+name+is+Sarah+and+I+live+in+London+city)
|
||||||
|
- [जीपीटी-2 के साथ टेक्स्ट जनरेशन](https://huggingface.co/gpt2?text=A+long+time+ago%2C+)
|
||||||
|
- [रॉबर्टा के साथ प्राकृतिक भाषा निष्कर्ष](https://huggingface.co/roberta-large-mnli?text=The+dog+was+lost.+Nobody+lost+any+animal)
|
||||||
|
- [बार्ट के साथ पाठ सारांश](https://huggingface.co/facebook/bart-large-cnn?text=The+tower+is+324+metres+%281%2C063+ft%29+tall%2C+about+the+same+height+as+an+81-storey+building%2C+and+the+tallest+structure+in+Paris.+Its+base+is+square%2C+measuring+125+metres+%28410+ft%29+on+each+side.+During+its+construction%2C+the+Eiffel+Tower+surpassed+the+Washington+Monument+to+become+the+tallest+man-made+structure+in+the+world%2C+a+title+it+held+for+41+years+until+the+Chrysler+Building+in+New+York+City+was+finished+in+1930.+It+was+the+first+structure+to+reach+a+height+of+300+metres.+Due+to+the+addition+of+a+broadcasting+aerial+at+the+top+of+the+tower+in+1957%2C+it+is+now+taller+than+the+Chrysler+Building+by+5.2+metres+%2817+ft%29.+Excluding+transmitters%2C+the+Eiffel+Tower+is+the+second+tallest+free-standing+structure+in+France+after+the+Millau+Viaduct)
|
||||||
|
- [डिस्टिलबर्ट के साथ प्रश्नोत्तर](https://huggingface.co/distilbert-base-uncased-distilled-squad?text=Which+name+is+also+used+to+describe+the+Amazon+rainforest+in+English%3F&context=The+Amazon+rainforest+%28Portuguese%3A+Floresta+Amaz%C3%B4nica+or+Amaz%C3%B4nia%3B+Spanish%3A+Selva+Amaz%C3%B3nica%2C+Amazon%C3%ADa+or+usually+Amazonia%3B+French%3A+For%C3%AAt+amazonienne%3B+Dutch%3A+Amazoneregenwoud%29%2C+also+known+in+English+as+Amazonia+or+the+Amazon+Jungle%2C+is+a+moist+broadleaf+forest+that+covers+most+of+the+Amazon+basin+of+South+America.+This+basin+encompasses+7%2C000%2C000+square+kilometres+%282%2C700%2C000+sq+mi%29%2C+of+which+5%2C500%2C000+square+kilometres+%282%2C100%2C000+sq+mi%29+are+covered+by+the+rainforest.+This+region+includes+territory+belonging+to+nine+nations.+The+majority+of+the+forest+is+contained+within+Brazil%2C+with+60%25+of+the+rainforest%2C+followed+by+Peru+with+13%25%2C+Colombia+with+10%25%2C+and+with+minor+amounts+in+Venezuela%2C+Ecuador%2C+Bolivia%2C+Guyana%2C+Suriname+and+French+Guiana.+States+or+departments+in+four+nations+contain+%22Amazonas%22+in+their+names.+The+Amazon+represents+over+half+of+the+planet%27s+remaining+rainforests%2C+and+comprises+the+largest+and+most+biodiverse+tract+of+tropical+rainforest+in+the+world%2C+with+an+estimated+390+billion+individual+trees+divided+into+16%2C000+species)
|
||||||
|
- [अनुवाद के लिए T5 का प्रयोग करें](https://huggingface.co/t5-base?text=My+name+is+Wolfgang+and+I+live+in+Berlin)
|
||||||
|
|
||||||
|
**[Write With Transformer](https://transformer.huggingface.co)**,हगिंग फेस टीम द्वारा बनाया गया, यह एक आधिकारिक पाठ पीढ़ी है demo。
|
||||||
|
|
||||||
|
## यदि आप हगिंग फेस टीम से बीस्पोक समर्थन की तलाश कर रहे हैं
|
||||||
|
|
||||||
|
<a target="_blank" href="https://huggingface.co/support">
|
||||||
|
<img alt="HuggingFace Expert Acceleration Program" src="https://huggingface.co/front/thumbnails/support.png" style="max-width: 600px; border: 1px solid #eee; border-radius: 4px; box-shadow: 0 1px 2px 0 rgba(0, 0, 0, 0.05);">
|
||||||
|
</a><br>
|
||||||
|
|
||||||
|
## जल्दी शुरू करें
|
||||||
|
|
||||||
|
हम त्वरित उपयोग के लिए मॉडल प्रदान करते हैं `pipeline` (पाइपलाइन) एपीआई। पाइपलाइन पूर्व-प्रशिक्षित मॉडल और संबंधित पाठ प्रीप्रोसेसिंग को एकत्रित करती है। सकारात्मक और नकारात्मक भावना को निर्धारित करने के लिए पाइपलाइनों का उपयोग करने का एक त्वरित उदाहरण यहां दिया गया है:
|
||||||
|
|
||||||
|
```python
|
||||||
|
>>> from transformers import pipeline
|
||||||
|
|
||||||
|
# भावना विश्लेषण पाइपलाइन का उपयोग करना
|
||||||
|
>>> classifier = pipeline('sentiment-analysis')
|
||||||
|
>>> classifier('We are very happy to introduce pipeline to the transformers repository.')
|
||||||
|
[{'label': 'POSITIVE', 'score': 0.9996980428695679}]
|
||||||
|
```
|
||||||
|
|
||||||
|
कोड की दूसरी पंक्ति पाइपलाइन द्वारा उपयोग किए गए पूर्व-प्रशिक्षित मॉडल को डाउनलोड और कैश करती है, जबकि कोड की तीसरी पंक्ति दिए गए पाठ पर मूल्यांकन करती है। यहां उत्तर 99 आत्मविश्वास के स्तर के साथ "सकारात्मक" है।
|
||||||
|
|
||||||
|
कई एनएलपी कार्यों में आउट ऑफ़ द बॉक्स पाइपलाइनों का पूर्व-प्रशिक्षण होता है। उदाहरण के लिए, हम किसी दिए गए पाठ से किसी प्रश्न का उत्तर आसानी से निकाल सकते हैं:
|
||||||
|
|
||||||
|
``` python
|
||||||
|
>>> from transformers import pipeline
|
||||||
|
|
||||||
|
# प्रश्नोत्तर पाइपलाइन का उपयोग करना
|
||||||
|
>>> question_answerer = pipeline('question-answering')
|
||||||
|
>>> question_answerer({
|
||||||
|
... 'question': 'What is the name of the repository ?',
|
||||||
|
... 'context': 'Pipeline has been included in the huggingface/transformers repository'
|
||||||
|
... })
|
||||||
|
{'score': 0.30970096588134766, 'start': 34, 'end': 58, 'answer': 'huggingface/transformers'}
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
उत्तर देने के अलावा, पूर्व-प्रशिक्षित मॉडल संगत आत्मविश्वास स्कोर भी देता है, जहां उत्तर टोकनयुक्त पाठ में शुरू और समाप्त होता है। आप [इस ट्यूटोरियल](https://huggingface.co/docs/transformers/task_summary) से पाइपलाइन एपीआई द्वारा समर्थित कार्यों के बारे में अधिक जान सकते हैं।
|
||||||
|
|
||||||
|
अपने कार्य पर किसी भी पूर्व-प्रशिक्षित मॉडल को डाउनलोड करना और उसका उपयोग करना भी कोड की तीन पंक्तियों की तरह सरल है। यहाँ PyTorch संस्करण के लिए एक उदाहरण दिया गया है:
|
||||||
|
```python
|
||||||
|
>>> from transformers import AutoTokenizer, AutoModel
|
||||||
|
|
||||||
|
>>> tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
|
||||||
|
>>> model = AutoModel.from_pretrained("bert-base-uncased")
|
||||||
|
|
||||||
|
>>> inputs = tokenizer("Hello world!", return_tensors="pt")
|
||||||
|
>>> outputs = model(**inputs)
|
||||||
|
```
|
||||||
|
यहाँ समकक्ष है TensorFlow कोड:
|
||||||
|
```python
|
||||||
|
>>> from transformers import AutoTokenizer, TFAutoModel
|
||||||
|
|
||||||
|
>>> tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
|
||||||
|
>>> model = TFAutoModel.from_pretrained("bert-base-uncased")
|
||||||
|
|
||||||
|
>>> inputs = tokenizer("Hello world!", return_tensors="tf")
|
||||||
|
>>> outputs = model(**inputs)
|
||||||
|
```
|
||||||
|
|
||||||
|
टोकननाइज़र सभी पूर्व-प्रशिक्षित मॉडलों के लिए प्रीप्रोसेसिंग प्रदान करता है और इसे सीधे एक स्ट्रिंग (जैसे ऊपर दिए गए उदाहरण) या किसी सूची पर बुलाया जा सकता है। यह एक डिक्शनरी (तानाशाही) को आउटपुट करता है जिसे आप डाउनस्ट्रीम कोड में उपयोग कर सकते हैं या `**` अनपैकिंग एक्सप्रेशन के माध्यम से सीधे मॉडल को पास कर सकते हैं।
|
||||||
|
|
||||||
|
मॉडल स्वयं एक नियमित [Pytorch `nn.Module`](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) या [TensorFlow `tf.keras.Model`](https ://pytorch.org/docs/stable/nn.html#torch.nn.Module) ://www.tensorflow.org/api_docs/python/tf/keras/Model) (आपके बैकएंड के आधार पर), जो हो सकता है सामान्य तरीके से उपयोग किया जाता है। [यह ट्यूटोरियल](https://huggingface.co/transformers/training.html) बताता है कि इस तरह के मॉडल को क्लासिक PyTorch या TensorFlow प्रशिक्षण लूप में कैसे एकीकृत किया जाए, या हमारे `ट्रेनर` एपीआई का उपयोग कैसे करें ताकि इसे जल्दी से फ़ाइन ट्यून किया जा सके।एक नया डेटासेट पे।
|
||||||
|
|
||||||
|
## ट्रांसफार्मर का उपयोग क्यों करें?
|
||||||
|
|
||||||
|
1. उपयोग में आसानी के लिए उन्नत मॉडल:
|
||||||
|
- एनएलयू और एनएलजी पर बेहतर प्रदर्शन
|
||||||
|
- प्रवेश के लिए कम बाधाओं के साथ शिक्षण और अभ्यास के अनुकूल
|
||||||
|
- उपयोगकर्ता-सामना करने वाले सार तत्व, केवल तीन वर्गों को जानने की जरूरत है
|
||||||
|
- सभी मॉडलों के लिए एकीकृत एपीआई
|
||||||
|
|
||||||
|
1. कम कम्प्यूटेशनल ओवरहेड और कम कार्बन उत्सर्जन:
|
||||||
|
- शोधकर्ता हर बार नए सिरे से प्रशिक्षण देने के बजाय प्रशिक्षित मॉडल साझा कर सकते हैं
|
||||||
|
- इंजीनियर गणना समय और उत्पादन ओवरहेड को कम कर सकते हैं
|
||||||
|
- दर्जनों मॉडल आर्किटेक्चर, 2,000 से अधिक पूर्व-प्रशिक्षित मॉडल, 100 से अधिक भाषाओं का समर्थन
|
||||||
|
|
||||||
|
1.मॉडल जीवनचक्र के हर हिस्से को शामिल करता है:
|
||||||
|
- कोड की केवल 3 पंक्तियों में उन्नत मॉडलों को प्रशिक्षित करें
|
||||||
|
- मॉडल को मनमाने ढंग से विभिन्न डीप लर्निंग फ्रेमवर्क के बीच स्थानांतरित किया जा सकता है, जैसा आप चाहते हैं
|
||||||
|
- निर्बाध रूप से प्रशिक्षण, मूल्यांकन और उत्पादन के लिए सबसे उपयुक्त ढांचा चुनें
|
||||||
|
|
||||||
|
1. आसानी से अनन्य मॉडल को अनुकूलित करें और अपनी आवश्यकताओं के लिए मामलों का उपयोग करें:
|
||||||
|
- हम मूल पेपर परिणामों को पुन: पेश करने के लिए प्रत्येक मॉडल आर्किटेक्चर के लिए कई उपयोग के मामले प्रदान करते हैं
|
||||||
|
- मॉडल की आंतरिक संरचना पारदर्शी और सुसंगत रहती है
|
||||||
|
- मॉडल फ़ाइल को अलग से इस्तेमाल किया जा सकता है, जो संशोधन और त्वरित प्रयोग के लिए सुविधाजनक है
|
||||||
|
|
||||||
|
## मुझे ट्रांसफॉर्मर का उपयोग कब नहीं करना चाहिए?
|
||||||
|
|
||||||
|
- यह लाइब्रेरी मॉड्यूलर न्यूरल नेटवर्क टूलबॉक्स नहीं है। मॉडल फ़ाइल में कोड जानबूझकर अल्पविकसित है, बिना अतिरिक्त सार इनकैप्सुलेशन के, ताकि शोधकर्ता अमूर्तता और फ़ाइल जंपिंग में शामिल हुए जल्दी से पुनरावृति कर सकें।
|
||||||
|
- `ट्रेनर` एपीआई किसी भी मॉडल के साथ संगत नहीं है, यह केवल इस पुस्तकालय के मॉडल के लिए अनुकूलित है। यदि आप सामान्य मशीन लर्निंग के लिए उपयुक्त प्रशिक्षण लूप कार्यान्वयन की तलाश में हैं, तो कहीं और देखें।
|
||||||
|
- हमारे सर्वोत्तम प्रयासों के बावजूद, [उदाहरण निर्देशिका] (https://github.com/huggingface/transformers/tree/main/examples) में स्क्रिप्ट केवल उपयोग के मामले हैं। आपकी विशिष्ट समस्या के लिए, वे जरूरी नहीं कि बॉक्स से बाहर काम करें, और आपको कोड की कुछ पंक्तियों को सूट करने की आवश्यकता हो सकती है।
|
||||||
|
|
||||||
|
## स्थापित करना
|
||||||
|
|
||||||
|
### पिप का उपयोग करना
|
||||||
|
|
||||||
|
इस रिपॉजिटरी का परीक्षण Python 3.6+, Flax 0.3.2+, PyTorch 1.3.1+ और TensorFlow 2.3+ के तहत किया गया है।
|
||||||
|
|
||||||
|
आप [वर्चुअल एनवायरनमेंट] (https://docs.python.org/3/library/venv.html) में 🤗 ट्रांसफॉर्मर इंस्टॉल कर सकते हैं। यदि आप अभी तक पायथन के वर्चुअल एनवायरनमेंट से परिचित नहीं हैं, तो कृपया इसे [उपयोगकर्ता निर्देश] (https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/) पढ़ें।
|
||||||
|
|
||||||
|
सबसे पहले, पायथन के उस संस्करण के साथ एक आभासी वातावरण बनाएं जिसका आप उपयोग करने और उसे सक्रिय करने की योजना बना रहे हैं।
|
||||||
|
|
||||||
|
फिर, आपको Flax, PyTorch या TensorFlow में से किसी एक को स्थापित करने की आवश्यकता है। अपने प्लेटफ़ॉर्म पर इन फ़्रेमवर्क को स्थापित करने के लिए, [TensorFlow स्थापना पृष्ठ](https://www.tensorflow.org/install/), [PyTorch स्थापना पृष्ठ](https://pytorch.org/get-started /locally/# देखें) start-locally) या [Flax स्थापना पृष्ठ](https://github.com/google/flax#quick-install).
|
||||||
|
|
||||||
|
जब इनमें से कोई एक बैकएंड सफलतापूर्वक स्थापित हो जाता है, तो ट्रांसफॉर्मर निम्नानुसार स्थापित किए जा सकते हैं:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
pip install transformers
|
||||||
|
```
|
||||||
|
|
||||||
|
यदि आप उपयोग के मामलों को आज़माना चाहते हैं या आधिकारिक रिलीज़ से पहले नवीनतम इन-डेवलपमेंट कोड का उपयोग करना चाहते हैं, तो आपको [सोर्स से इंस्टॉल करना होगा](https://huggingface.co/docs/transformers/installation#installing-from- स्रोत)।
|
||||||
|
|
||||||
|
### कोंडा का उपयोग करना
|
||||||
|
|
||||||
|
ट्रांसफॉर्मर संस्करण 4.0.0 के बाद से, हमारे पास एक कोंडा चैनल है: `हगिंगफेस`।
|
||||||
|
|
||||||
|
ट्रांसफॉर्मर कोंडा के माध्यम से निम्नानुसार स्थापित किया जा सकता है:
|
||||||
|
|
||||||
|
```shell script
|
||||||
|
conda install -c huggingface transformers
|
||||||
|
```
|
||||||
|
|
||||||
|
कोंडा के माध्यम से Flax, PyTorch, या TensorFlow में से किसी एक को स्थापित करने के लिए, निर्देशों के लिए उनके संबंधित स्थापना पृष्ठ देखें।
|
||||||
|
|
||||||
|
## मॉडल आर्किटेक्चर
|
||||||
|
[उपयोगकर्ता](https://huggingface.co/users) और [organization](https://huggingface.co) द्वारा ट्रांसफॉर्मर समर्थित [**सभी मॉडल चौकियों**](https://huggingface.co/models) /users) हगिंगफेस.को/ऑर्गनाइजेशन), सभी को बिना किसी बाधा के हगिंगफेस.को [मॉडल हब](https://huggingface.co) के साथ एकीकृत किया गया है।
|
||||||
|
|
||||||
|
चौकियों की वर्तमान संख्या: 
|
||||||
|
|
||||||
|
🤗 ट्रांसफॉर्मर वर्तमान में निम्नलिखित आर्किटेक्चर का समर्थन करते हैं (मॉडल के अवलोकन के लिए [यहां] देखें (https://huggingface.co/docs/transformers/model_summary)):
|
||||||
|
|
||||||
|
1. **[ALBERT](https://huggingface.co/docs/transformers/model_doc/albert)** (Google Research and the Toyota Technological Institute at Chicago) साथ थीसिस [ALBERT: A Lite BERT for Self-supervised भाषा प्रतिनिधित्व सीखना](https://arxiv.org/abs/1909.11942), झेंझोंग लैन, मिंगदा चेन, सेबेस्टियन गुडमैन, केविन गिम्पेल, पीयूष शर्मा, राडू सोरिकट
|
||||||
|
1. **[ALIGN](https://huggingface.co/docs/transformers/model_doc/align)** (Google Research से) Chao Jia, Yinfei Yang, Ye Xia, Yi-Ting Chen, Zarana Parekh, Hieu Pham, Quoc V. Le, Yunhsuan Sung, Zhen Li, Tom Duerig. द्वाराअनुसंधान पत्र [Scaling Up Visual and Vision-Language Representation Learning With Noisy Text Supervision](https://arxiv.org/abs/2102.05918) के साथ जारी किया गया
|
||||||
|
1. **[AltCLIP](https://huggingface.co/docs/transformers/model_doc/altclip)** (from BAAI) released with the paper [AltCLIP: Altering the Language Encoder in CLIP for Extended Language Capabilities](https://arxiv.org/abs/2211.06679) by Chen, Zhongzhi and Liu, Guang and Zhang, Bo-Wen and Ye, Fulong and Yang, Qinghong and Wu, Ledell.
|
||||||
|
1. **[Audio Spectrogram Transformer](https://huggingface.co/docs/transformers/model_doc/audio-spectrogram-transformer)** (from MIT) released with the paper [AST: Audio Spectrogram Transformer](https://arxiv.org/abs/2104.01778) by Yuan Gong, Yu-An Chung, James Glass.
|
||||||
|
1. **[Autoformer](https://huggingface.co/docs/transformers/model_doc/autoformer)** (from Tsinghua University) released with the paper [Autoformer: Decomposition Transformers with Auto-Correlation for Long-Term Series Forecasting](https://arxiv.org/abs/2106.13008) by Haixu Wu, Jiehui Xu, Jianmin Wang, Mingsheng Long.
|
||||||
|
1. **[Bark](https://huggingface.co/docs/transformers/model_doc/bark)** (from Suno) released in the repository [suno-ai/bark](https://github.com/suno-ai/bark) by Suno AI team.
|
||||||
|
1. **[BART](https://huggingface.co/docs/transformers/model_doc/bart)** (फेसबुक) साथ थीसिस [बार्ट: प्राकृतिक भाषा निर्माण, अनुवाद के लिए अनुक्रम-से-अनुक्रम पूर्व प्रशिक्षण , और समझ] (https://arxiv.org/pdf/1910.13461.pdf) पर निर्भर माइक लुईस, यिनहान लियू, नमन गोयल, मार्जन ग़ज़विनिनेजाद, अब्देलरहमान मोहम्मद, ओमर लेवी, वेस स्टोयानोव और ल्यूक ज़ेटलमॉयर
|
||||||
|
1. **[BARThez](https://huggingface.co/docs/transformers/model_doc/barthez)** (से École polytechnique) साथ थीसिस [BARThez: a Skilled Pretrained French Sequence-to-Sequence Model](https://arxiv.org/abs/2010.12321) पर निर्भर Moussa Kamal Eddine, Antoine J.-P. Tixier, Michalis Vazirgiannis रिहाई।
|
||||||
|
1. **[BARTpho](https://huggingface.co/docs/transformers/model_doc/bartpho)** (VinAI Research से) साथ में पेपर [BARTpho: Pre-trained Sequence-to-Sequence Models for Vietnamese](https://arxiv.org/abs/2109.09701)गुयेन लुओंग ट्रान, डुओंग मिन्ह ले और डाट क्वोक गुयेन द्वारा पोस्ट किया गया।
|
||||||
|
1. **[BEiT](https://huggingface.co/docs/transformers/model_doc/beit)** (Microsoft से) साथ में कागज [BEiT: BERT इमेज ट्रांसफॉर्मर्स का प्री-ट्रेनिंग](https://arxiv.org/abs/2106.08254) Hangbo Bao, Li Dong, Furu Wei द्वारा।
|
||||||
|
1. **[BERT](https://huggingface.co/docs/transformers/model_doc/bert)** (गूगल से) साथ वाला पेपर [बीईआरटी: प्री-ट्रेनिंग ऑफ डीप बिडायरेक्शनल ट्रांसफॉर्मर्स फॉर लैंग्वेज अंडरस्टैंडिंग](https://arxiv.org/abs/1810.04805) जैकब डेवलिन, मिंग-वेई चांग, केंटन ली और क्रिस्टीना टौटानोवा द्वारा प्रकाशित किया गया था। .
|
||||||
|
1. **[BERT For Sequence Generation](https://huggingface.co/docs/transformers/model_doc/bert-generation)** (गूगल से) साथ देने वाला पेपर [सीक्वेंस जेनरेशन टास्क के लिए प्री-ट्रेंड चेकपॉइंट का इस्तेमाल करना](https ://arxiv.org/abs/1907.12461) साशा रोठे, शशि नारायण, अलियाक्सि सेवेरिन द्वारा।
|
||||||
|
1. **[BERTweet](https://huggingface.co/docs/transformers/model_doc/bertweet)** (VinAI Research से) साथ में पेपर [BERTweet: अंग्रेजी ट्वीट्स के लिए एक पूर्व-प्रशिक्षित भाषा मॉडल] (https://aclanthology.org/2020.emnlp-demos.2/) डाट क्वोक गुयेन, थान वु और अन्ह तुआन गुयेन द्वारा प्रकाशित।
|
||||||
|
1. **[BigBird-Pegasus](https://huggingface.co/docs/transformers/model_doc/bigbird_pegasus)** (गूगल रिसर्च से) साथ वाला पेपर [बिग बर्ड: ट्रांसफॉर्मर्स फॉर लॉन्गर सीक्वेंस](https://arxiv .org/abs/2007.14062) मंज़िल ज़हीर, गुरु गुरुगणेश, अविनावा दुबे, जोशुआ आइंस्ली, क्रिस अल्बर्टी, सैंटियागो ओंटानोन, फिलिप फाम, अनिरुद्ध रावुला, किफ़ान वांग, ली यांग, अमर अहमद द्वारा।
|
||||||
|
1. **[BigBird-RoBERTa](https://huggingface.co/docs/transformers/model_doc/big_bird)** (गूगल रिसर्च से) साथ में पेपर [बिग बर्ड: ट्रांसफॉर्मर्स फॉर लॉन्गर सीक्वेंस](https://arxiv.org/abs/2007.14062) मंज़िल ज़हीर, गुरु गुरुगणेश, अविनावा दुबे, जोशुआ आइंस्ली, क्रिस अल्बर्टी, सैंटियागो ओंटानन, फिलिप फाम द्वारा , अनिरुद्ध रावुला, किफ़ान वांग, ली यांग, अमर अहमद द्वारा पोस्ट किया गया।
|
||||||
|
1. **[BioGpt](https://huggingface.co/docs/transformers/model_doc/biogpt)** (from Microsoft Research AI4Science) released with the paper [BioGPT: generative pre-trained transformer for biomedical text generation and mining](https://academic.oup.com/bib/advance-article/doi/10.1093/bib/bbac409/6713511?guestAccessKey=a66d9b5d-4f83-4017-bb52-405815c907b9) by Renqian Luo, Liai Sun, Yingce Xia, Tao Qin, Sheng Zhang, Hoifung Poon and Tie-Yan Liu.
|
||||||
|
1. **[BiT](https://huggingface.co/docs/transformers/model_doc/bit)** (from Google AI) released with the paper [Big Transfer (BiT) by Alexander Kolesnikov, Lucas Beyer, Xiaohua Zhai, Joan Puigcerver, Jessica Yung, Sylvain Gelly, Neil Houlsby.
|
||||||
|
1. **[Blenderbot](https://huggingface.co/docs/transformers/model_doc/blenderbot)** (फेसबुक से) साथ में कागज [एक ओपन-डोमेन चैटबॉट बनाने की विधि](https://arxiv.org /abs/2004.13637) स्टीफन रोलर, एमिली दीनन, नमन गोयल, दा जू, मैरी विलियमसन, यिनहान लियू, जिंग जू, मायल ओट, कर्ट शस्टर, एरिक एम। स्मिथ, वाई-लैन बॉरो, जेसन वेस्टन द्वारा।
|
||||||
|
1. **[BlenderbotSmall](https://huggingface.co/docs/transformers/model_doc/blenderbot-small)** (फेसबुक से) साथ में पेपर [एक ओपन-डोमेन चैटबॉट बनाने की रेसिपी](https://arxiv .org/abs/2004.13637) स्टीफन रोलर, एमिली दीनन, नमन गोयल, दा जू, मैरी विलियमसन, यिनहान लियू, जिंग जू, मायल ओट, कर्ट शस्टर, एरिक एम स्मिथ, वाई-लैन बॉरो, जेसन वेस्टन द्वारा।
|
||||||
|
1. **[BLIP](https://huggingface.co/docs/transformers/model_doc/blip)** (from Salesforce) released with the paper [BLIP: Bootstrapping Language-Image Pre-training for Unified Vision-Language Understanding and Generation](https://arxiv.org/abs/2201.12086) by Junnan Li, Dongxu Li, Caiming Xiong, Steven Hoi.
|
||||||
|
1. **[BLIP-2](https://huggingface.co/docs/transformers/model_doc/blip-2)** (Salesforce से) Junnan Li, Dongxu Li, Silvio Savarese, Steven Hoi. द्वाराअनुसंधान पत्र [BLIP-2: Bootstrapping Language-Image Pre-training with Frozen Image Encoders and Large Language Models](https://arxiv.org/abs/2301.12597) के साथ जारी किया गया
|
||||||
|
1. **[BLOOM](https://huggingface.co/docs/transformers/model_doc/bloom)** (from BigScience workshop) released by the [BigSicence Workshop](https://bigscience.huggingface.co/).
|
||||||
|
1. **[BORT](https://huggingface.co/docs/transformers/model_doc/bort)** (एलेक्सा से) कागज के साथ [बीईआरटी के लिए ऑप्टिमल सबआर्किटेक्चर एक्सट्रैक्शन](https://arxiv.org/abs/ 2010.10499) एड्रियन डी विंटर और डैनियल जे पेरी द्वारा।
|
||||||
|
1. **[BridgeTower](https://huggingface.co/docs/transformers/model_doc/bridgetower)** (हरबिन इंस्टिट्यूट ऑफ़ टेक्नोलॉजी/माइक्रोसॉफ्ट रिसर्च एशिया/इंटेल लैब्स से) कागज के साथ [ब्रिजटॉवर: विजन-लैंग्वेज रिप्रेजेंटेशन लर्निंग में एनकोडर्स के बीच ब्रिज बनाना](<https://arxiv.org/abs/2206.08657>) by Xiao Xu, Chenfei Wu, Shachar Rosenman, Vasudev Lal, Wanxiang Che, Nan Duan.
|
||||||
|
1. **[ByT5](https://huggingface.co/docs/transformers/model_doc/byt5)** (Google अनुसंधान से) साथ में कागज [ByT5: पूर्व-प्रशिक्षित बाइट-टू-बाइट मॉडल के साथ एक टोकन-मुक्त भविष्य की ओर] (https://arxiv.org/abs/2105.13626) Linting Xue, Aditya Barua, Noah Constant, रामी अल-रफू, शरण नारंग, मिहिर काले, एडम रॉबर्ट्स, कॉलिन रैफेल द्वारा पोस्ट किया गया।
|
||||||
|
1. **[CamemBERT](https://huggingface.co/docs/transformers/model_doc/camembert)** (इनरिया/फेसबुक/सोरबोन से) साथ में कागज [CamemBERT: एक टेस्टी फ्रेंच लैंग्वेज मॉडल](https:// arxiv.org/abs/1911.03894) लुई मार्टिन*, बेंजामिन मुलर*, पेड्रो जेवियर ऑर्टिज़ सुआरेज़*, योआन ड्यूपॉन्ट, लॉरेंट रोमरी, एरिक विलेमोन्टे डे ला क्लर्जरी, जैमे सेडाह और बेनोइट सगोट द्वारा।
|
||||||
|
1. **[CANINE](https://huggingface.co/docs/transformers/model_doc/canine)** (Google रिसर्च से) साथ में दिया गया पेपर [कैनाइन: प्री-ट्रेनिंग ए एफिशिएंट टोकनाइजेशन-फ्री एनकोडर फॉर लैंग्वेज रिप्रेजेंटेशन]( https://arxiv.org/abs/2103.06874) जोनाथन एच क्लार्क, डैन गैरेट, यूलिया टर्क, जॉन विएटिंग द्वारा।
|
||||||
|
1. **[Chinese-CLIP](https://huggingface.co/docs/transformers/model_doc/chinese_clip)** (from OFA-Sys) released with the paper [Chinese CLIP: Contrastive Vision-Language Pretraining in Chinese](https://arxiv.org/abs/2211.01335) by An Yang, Junshu Pan, Junyang Lin, Rui Men, Yichang Zhang, Jingren Zhou, Chang Zhou.
|
||||||
|
1. **[CLAP](https://huggingface.co/docs/transformers/model_doc/clap)** (LAION-AI से) Yusong Wu, Ke Chen, Tianyu Zhang, Yuchen Hui, Taylor Berg-Kirkpatrick, Shlomo Dubnov. द्वाराअनुसंधान पत्र [Large-scale Contrastive Language-Audio Pretraining with Feature Fusion and Keyword-to-Caption Augmentation](https://arxiv.org/abs/2211.06687) के साथ जारी किया गया
|
||||||
|
1. **[CLIP](https://huggingface.co/docs/transformers/model_doc/clip)** (OpenAI से) साथ वाला पेपर [लर्निंग ट्रांसफरेबल विजुअल मॉडल फ्रॉम नेचुरल लैंग्वेज सुपरविजन](https://arxiv.org /abs/2103.00020) एलेक रैडफोर्ड, जोंग वूक किम, क्रिस हैलासी, आदित्य रमेश, गेब्रियल गोह, संध्या अग्रवाल, गिरीश शास्त्री, अमांडा एस्केल, पामेला मिश्किन, जैक क्लार्क, ग्रेचेन क्रुएगर, इल्या सुत्स्केवर द्वारा।
|
||||||
|
1. **[CLIPSeg](https://huggingface.co/docs/transformers/model_doc/clipseg)** (from University of Göttingen) released with the paper [Image Segmentation Using Text and Image Prompts](https://arxiv.org/abs/2112.10003) by Timo Lüddecke and Alexander Ecker.
|
||||||
|
1. **[CodeGen](https://huggingface.co/docs/transformers/model_doc/codegen)** (सेल्सफोर्स से) साथ में पेपर [प्रोग्राम सिंथेसिस के लिए एक संवादात्मक प्रतिमान](https://arxiv.org/abs/2203.13474) एरिक निजकैंप, बो पैंग, हिरोआकी हयाशी, लिफू तू, हुआन वांग, यिंगबो झोउ, सिल्वियो सावरेस, कैमिंग जिओंग रिलीज।
|
||||||
|
1. **[Conditional DETR](https://huggingface.co/docs/transformers/model_doc/conditional_detr)** (माइक्रोसॉफ्ट रिसर्च एशिया से) कागज के साथ [फास्ट ट्रेनिंग कन्वर्जेंस के लिए सशर्त डीईटीआर](https://arxiv. org/abs/2108.06152) डेपू मेंग, ज़ियाओकांग चेन, ज़ेजिया फैन, गैंग ज़ेंग, होउकियांग ली, युहुई युआन, लेई सन, जिंगडोंग वांग द्वारा।
|
||||||
|
1. **[ConvBERT](https://huggingface.co/docs/transformers/model_doc/convbert)** (YituTech से) साथ में कागज [ConvBERT: स्पैन-आधारित डायनेमिक कनवल्शन के साथ BERT में सुधार](https://arxiv .org/abs/2008.02496) जिहांग जियांग, वीहाओ यू, डाकान झोउ, युनपेंग चेन, जियाशी फेंग, शुइचेंग यान द्वारा।
|
||||||
|
1. **[ConvNeXT](https://huggingface.co/docs/transformers/model_doc/convnext)** (Facebook AI से) साथ वाला पेपर [A ConvNet for the 2020s](https://arxiv.org/abs /2201.03545) ज़ुआंग लियू, हेंज़ी माओ, चाओ-युआन वू, क्रिस्टोफ़ फीचटेनहोफ़र, ट्रेवर डेरेल, सैनिंग ज़ी द्वारा।
|
||||||
|
1. **[ConvNeXTV2](https://huggingface.co/docs/transformers/model_doc/convnextv2)** (from Facebook AI) released with the paper [ConvNeXt V2: Co-designing and Scaling ConvNets with Masked Autoencoders](https://arxiv.org/abs/2301.00808) by Sanghyun Woo, Shoubhik Debnath, Ronghang Hu, Xinlei Chen, Zhuang Liu, In So Kweon, Saining Xie.
|
||||||
|
1. **[CPM](https://huggingface.co/docs/transformers/model_doc/cpm)** (सिंघुआ यूनिवर्सिटी से) साथ में पेपर [सीपीएम: ए लार्ज-स्केल जेनेरेटिव चाइनीज प्री-ट्रेंड लैंग्वेज मॉडल](https : //arxiv.org/abs/2012.00413) झेंग्यान झांग, जू हान, हाओ झोउ, पेई के, युक्सियन गु, डेमिंग ये, युजिया किन, युशेंग सु, हाओझे जी, जियान गुआन, फैंचाओ क्यूई, ज़ियाओझी वांग, यानान झेंग द्वारा , गुओयांग ज़ेंग, हुआनकी काओ, शेंगकी चेन, डाइक्सुआन ली, ज़ेनबो सन, ज़ियुआन लियू, मिनली हुआंग, वेंटाओ हान, जी तांग, जुआनज़ी ली, ज़ियाओयान झू, माओसोंग सन।
|
||||||
|
1. **[CPM-Ant](https://huggingface.co/docs/transformers/model_doc/cpmant)** (from OpenBMB) released by the [OpenBMB](https://www.openbmb.org/).
|
||||||
|
1. **[CTRL](https://huggingface.co/docs/transformers/model_doc/ctrl)** (सेल्सफोर्स से) साथ में पेपर [CTRL: ए कंडिशनल ट्रांसफॉर्मर लैंग्वेज मॉडल फॉर कंट्रोलेबल जेनरेशन](https://arxiv.org/abs/1909.05858) नीतीश शिरीष केसकर*, ब्रायन मैककैन*, लव आर. वार्ष्णेय, कैमिंग जिओंग और रिचर्ड द्वारा सोचर द्वारा जारी किया गया।
|
||||||
|
1. **[CvT](https://huggingface.co/docs/transformers/model_doc/cvt)** (Microsoft से) साथ में दिया गया पेपर [CvT: इंट्रोड्यूसिंग कनवॉल्यूशन टू विजन ट्रांसफॉर्मर्स](https://arxiv.org/ एब्स/2103.15808) हैपिंग वू, बिन जिओ, नोएल कोडेला, मेंगचेन लियू, जियांग दाई, लू युआन, लेई झांग द्वारा।
|
||||||
|
1. **[Data2Vec](https://huggingface.co/docs/transformers/model_doc/data2vec)** (फेसबुक से) साथ में कागज [Data2Vec: भाषण, दृष्टि और भाषा में स्व-पर्यवेक्षित सीखने के लिए एक सामान्य ढांचा] (https://arxiv.org/abs/2202.03555) एलेक्सी बाएव्स्की, वेई-निंग सू, कियानटोंग जू, अरुण बाबू, जियाताओ गु, माइकल औली द्वारा पोस्ट किया गया।
|
||||||
|
1. **[DeBERTa](https://huggingface.co/docs/transformers/model_doc/deberta)** (Microsoft से) साथ में दिया गया पेपर [DeBERta: डिकोडिंग-एन्हांस्ड BERT विद डिसेंटैंगल्ड अटेंशन](https://arxiv. org/abs/2006.03654) पेंगचेंग हे, ज़ियाओडोंग लियू, जियानफेंग गाओ, वीज़ू चेन द्वारा।
|
||||||
|
1. **[DeBERTa-v2](https://huggingface.co/docs/transformers/model_doc/deberta-v2)** (Microsoft से) साथ में दिया गया पेपर [DeBERTa: डिकोडिंग-एन्हांस्ड BERT विथ डिसेंन्गल्ड अटेंशन](https: //arxiv.org/abs/2006.03654) पेंगचेंग हे, ज़ियाओडोंग लियू, जियानफेंग गाओ, वीज़ू चेन द्वारा पोस्ट किया गया।
|
||||||
|
1. **[Decision Transformer](https://huggingface.co/docs/transformers/model_doc/decision_transformer)** (बर्कले/फेसबुक/गूगल से) पेपर के साथ [डिसीजन ट्रांसफॉर्मर: रीनफोर्समेंट लर्निंग वाया सीक्वेंस मॉडलिंग](https : //arxiv.org/abs/2106.01345) लिली चेन, केविन लू, अरविंद राजेश्वरन, किमिन ली, आदित्य ग्रोवर, माइकल लास्किन, पीटर एबील, अरविंद श्रीनिवास, इगोर मोर्डच द्वारा पोस्ट किया गया।
|
||||||
|
1. **[Deformable DETR](https://huggingface.co/docs/transformers/model_doc/deformable_detr)** (सेंसटाइम रिसर्च से) साथ में पेपर [डिफॉर्मेबल डीईटीआर: डिफॉर्मेबल ट्रांसफॉर्मर्स फॉर एंड-टू-एंड ऑब्जेक्ट डिटेक्शन] (https://arxiv.org/abs/2010.04159) Xizhou Zhu, Weijie Su, Lewei Lu, Bin Li, Xiaogang Wang, जिफेंग दाई द्वारा पोस्ट किया गया।
|
||||||
|
1. **[DeiT](https://huggingface.co/docs/transformers/model_doc/deit)** (फेसबुक से) साथ में पेपर [ट्रेनिंग डेटा-एफिशिएंट इमेज ट्रांसफॉर्मर और डिस्टिलेशन थ्रू अटेंशन](https://arxiv .org/abs/2012.12877) ह्यूगो टौव्रोन, मैथ्यू कॉर्ड, मैथिज्स डूज़, फ़्रांसिस्को मस्सा, एलेक्ज़ेंडर सबलेरोल्स, हर्वे जेगौ द्वारा।
|
||||||
|
1. **[DePlot](https://huggingface.co/docs/transformers/model_doc/deplot)** (Google AI से) Fangyu Liu, Julian Martin Eisenschlos, Francesco Piccinno, Syrine Krichene, Chenxi Pang, Kenton Lee, Mandar Joshi, Wenhu Chen, Nigel Collier, Yasemin Altun. द्वाराअनुसंधान पत्र [DePlot: One-shot visual language reasoning by plot-to-table translation](https://arxiv.org/abs/2212.10505) के साथ जारी किया गया
|
||||||
|
1. **[DETA](https://huggingface.co/docs/transformers/model_doc/deta)** (from The University of Texas at Austin) released with the paper [NMS Strikes Back](https://arxiv.org/abs/2212.06137) by Jeffrey Ouyang-Zhang, Jang Hyun Cho, Xingyi Zhou, Philipp Krähenbühl.
|
||||||
|
1. **[DETR](https://huggingface.co/docs/transformers/model_doc/detr)** (फेसबुक से) साथ में कागज [ट्रांसफॉर्मर्स के साथ एंड-टू-एंड ऑब्जेक्ट डिटेक्शन](https://arxiv. org/abs/2005.12872) निकोलस कैरियन, फ़्रांसिस्को मस्सा, गेब्रियल सिनेव, निकोलस उसुनियर, अलेक्जेंडर किरिलोव, सर्गेई ज़ागोरुयको द्वारा।
|
||||||
|
1. **[DialoGPT](https://huggingface.co/docs/transformers/model_doc/dialogpt)** (माइक्रोसॉफ्ट रिसर्च से) कागज के साथ [DialoGPT: बड़े पैमाने पर जनरेटिव प्री-ट्रेनिंग फॉर कन्वर्सेशनल रिस्पांस जेनरेशन](https ://arxiv.org/abs/1911.00536) यिज़े झांग, सिकी सन, मिशेल गैली, येन-चुन चेन, क्रिस ब्रोकेट, जियांग गाओ, जियानफेंग गाओ, जिंगजिंग लियू, बिल डोलन द्वारा।
|
||||||
|
1. **[DiNAT](https://huggingface.co/docs/transformers/model_doc/dinat)** (from SHI Labs) released with the paper [Dilated Neighborhood Attention Transformer](https://arxiv.org/abs/2209.15001) by Ali Hassani and Humphrey Shi.
|
||||||
|
1. **[DistilBERT](https://huggingface.co/docs/transformers/model_doc/distilbert)** (हगिंगफेस से), साथ में कागज [डिस्टिलबर्ट, बीईआरटी का डिस्टिल्ड वर्जन: छोटा, तेज, सस्ता और हल्का] (https://arxiv.org/abs/1910.01108) विक्टर सनह, लिसांड्रे डेब्यू और थॉमस वुल्फ द्वारा पोस्ट किया गया। यही तरीका GPT-2 को [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERta से [DistilRoBERta](https://github.com) पर कंप्रेस करने के लिए भी लागू किया जाता है। / हगिंगफेस/ट्रांसफॉर्मर्स/ट्री/मेन/उदाहरण/डिस्टिलेशन), बहुभाषी BERT से [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) और डिस्टिलबर्ट का जर्मन संस्करण।
|
||||||
|
1. **[DiT](https://huggingface.co/docs/transformers/model_doc/dit)** (माइक्रोसॉफ्ट रिसर्च से) साथ में पेपर [DiT: सेल्फ सुपरवाइज्ड प्री-ट्रेनिंग फॉर डॉक्यूमेंट इमेज ट्रांसफॉर्मर](https://arxiv.org/abs/2203.02378) जुनलॉन्ग ली, यिहेंग जू, टेंगचाओ लव, लेई कुई, चा झांग द्वारा फुरु वेई द्वारा पोस्ट किया गया।
|
||||||
|
1. **[Donut](https://huggingface.co/docs/transformers/model_doc/donut)** (NAVER से) साथ में कागज [OCR-मुक्त डॉक्यूमेंट अंडरस्टैंडिंग ट्रांसफॉर्मर](https://arxiv.org/abs /2111.15664) गीवूक किम, टीकग्यू होंग, मूनबिन यिम, जियोंग्योन नाम, जिनयॉन्ग पार्क, जिनयॉन्ग यिम, वोनसेओक ह्वांग, सांगडू यूं, डोंगयून हान, सेउंग्युन पार्क द्वारा।
|
||||||
|
1. **[DPR](https://huggingface.co/docs/transformers/model_doc/dpr)** (फेसबुक से) साथ में पेपर [ओपन-डोमेन क्वेश्चन आंसरिंग के लिए डेंस पैसेज रिट्रीवल](https://arxiv. org/abs/2004.04906) व्लादिमीर करपुखिन, बरलास ओज़ुज़, सेवन मिन, पैट्रिक लुईस, लेडेल वू, सर्गेई एडुनोव, डैनकी चेन, और वेन-ताऊ यिह द्वारा।
|
||||||
|
1. **[DPT](https://huggingface.co/docs/transformers/master/model_doc/dpt)** (इंटेल लैब्स से) साथ में कागज [विज़न ट्रांसफॉर्मर्स फॉर डेंस प्रेडिक्शन](https://arxiv.org /abs/2103.13413) रेने रैनफ्टल, एलेक्सी बोचकोवस्की, व्लादलेन कोल्टन द्वारा।
|
||||||
|
1. **[EfficientFormer](https://huggingface.co/docs/transformers/model_doc/efficientformer)** (from Snap Research) released with the paper [EfficientFormer: Vision Transformers at MobileNetSpeed](https://arxiv.org/abs/2206.01191) by Yanyu Li, Geng Yuan, Yang Wen, Ju Hu, Georgios Evangelidis, Sergey Tulyakov, Yanzhi Wang, Jian Ren.
|
||||||
|
1. **[EfficientNet](https://huggingface.co/docs/transformers/model_doc/efficientnet)** (from Google Brain) released with the paper [EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks](https://arxiv.org/abs/1905.11946) by Mingxing Tan, Quoc V. Le.
|
||||||
|
1. **[ELECTRA](https://huggingface.co/docs/transformers/model_doc/electra)** (Google रिसर्च/स्टैनफोर्ड यूनिवर्सिटी से) साथ में दिया गया पेपर [इलेक्ट्रा: जेनरेटर के बजाय भेदभाव करने वाले के रूप में टेक्स्ट एन्कोडर्स का पूर्व-प्रशिक्षण] (https://arxiv.org/abs/2003.10555) केविन क्लार्क, मिन्ह-थांग लुओंग, क्वोक वी. ले, क्रिस्टोफर डी. मैनिंग द्वारा पोस्ट किया गया।
|
||||||
|
1. **[EnCodec](https://huggingface.co/docs/transformers/model_doc/encodec)** (Meta AI से) Alexandre Défossez, Jade Copet, Gabriel Synnaeve, Yossi Adi. द्वाराअनुसंधान पत्र [High Fidelity Neural Audio Compression](https://arxiv.org/abs/2210.13438) के साथ जारी किया गया
|
||||||
|
1. **[EncoderDecoder](https://huggingface.co/docs/transformers/model_doc/encoder-decoder)** (Google रिसर्च से) साथ में दिया गया पेपर [सीक्वेंस जेनरेशन टास्क के लिए प्री-ट्रेंड चेकपॉइंट का इस्तेमाल करना](https:/ /arxiv.org/abs/1907.12461) साशा रोठे, शशि नारायण, अलियाक्सि सेवेरिन द्वारा।
|
||||||
|
1. **[ERNIE](https://huggingface.co/docs/transformers/model_doc/ernie)**(Baidu से) साथ देने वाला पेपर [ERNIE: एन्हांस्ड रिप्रेजेंटेशन थ्रू नॉलेज इंटीग्रेशन](https://arxiv.org/abs/1904.09223) यू सन, शुओहुआन वांग, युकुन ली, शिकुन फेंग, ज़ुई चेन, हान झांग, शिन तियान, डैनक्सियांग झू, हाओ तियान, हुआ वू द्वारा पोस्ट किया गया।
|
||||||
|
1. **[ErnieM](https://huggingface.co/docs/transformers/model_doc/ernie_m)** (Baidu से) Xuan Ouyang, Shuohuan Wang, Chao Pang, Yu Sun, Hao Tian, Hua Wu, Haifeng Wang. द्वाराअनुसंधान पत्र [ERNIE-M: Enhanced Multilingual Representation by Aligning Cross-lingual Semantics with Monolingual Corpora](https://arxiv.org/abs/2012.15674) के साथ जारी किया गया
|
||||||
|
1. **[ESM](https://huggingface.co/docs/transformers/model_doc/esm)** (मेटा AI से) ट्रांसफॉर्मर प्रोटीन भाषा मॉडल हैं। **ESM-1b** पेपर के साथ जारी किया गया था [ अलेक्जेंडर राइव्स, जोशुआ मेयर, टॉम सर्कु, सिद्धार्थ गोयल, ज़ेमिंग लिन द्वारा जैविक संरचना और कार्य असुरक्षित सीखने को 250 मिलियन प्रोटीन अनुक्रमों तक स्केल करने से उभरता है] (https://www.pnas.org/content/118/15/e2016239118) जेसन लियू, डेमी गुओ, मायल ओट, सी. लॉरेंस ज़िटनिक, जेरी मा और रॉब फर्गस। **ESM-1v** को पेपर के साथ जारी किया गया था [भाषा मॉडल प्रोटीन फ़ंक्शन पर उत्परिवर्तन के प्रभावों की शून्य-शॉट भविष्यवाणी को सक्षम करते हैं] (https://doi.org/10.1101/2021.07.09.450648) जोशुआ मेयर, रोशन राव, रॉबर्ट वेरकुइल, जेसन लियू, टॉम सर्कु और अलेक्जेंडर राइव्स द्वारा। **ESM-2** को पेपर के साथ जारी किया गया था [भाषा मॉडल विकास के पैमाने पर प्रोटीन अनुक्रम सटीक संरचना भविष्यवाणी को सक्षम करते हैं](https://doi.org/10.1101/2022.07.20.500902) ज़ेमिंग लिन, हलील अकिन, रोशन राव, ब्रायन ही, झोंगकाई झू, वेंटिंग लू, ए द्वारा लान डॉस सैंटोस कोस्टा, मरियम फ़ज़ल-ज़रंडी, टॉम सर्कू, साल कैंडिडो, अलेक्जेंडर राइव्स।
|
||||||
|
1. **[Falcon](https://huggingface.co/docs/transformers/model_doc/falcon)** (from Technology Innovation Institute) by Almazrouei, Ebtesam and Alobeidli, Hamza and Alshamsi, Abdulaziz and Cappelli, Alessandro and Cojocaru, Ruxandra and Debbah, Merouane and Goffinet, Etienne and Heslow, Daniel and Launay, Julien and Malartic, Quentin and Noune, Badreddine and Pannier, Baptiste and Penedo, Guilherme.
|
||||||
|
1. **[FLAN-T5](https://huggingface.co/docs/transformers/model_doc/flan-t5)** (from Google AI) released in the repository [google-research/t5x](https://github.com/google-research/t5x/blob/main/docs/models.md#flan-t5-checkpoints) by Hyung Won Chung, Le Hou, Shayne Longpre, Barret Zoph, Yi Tay, William Fedus, Eric Li, Xuezhi Wang, Mostafa Dehghani, Siddhartha Brahma, Albert Webson, Shixiang Shane Gu, Zhuyun Dai, Mirac Suzgun, Xinyun Chen, Aakanksha Chowdhery, Sharan Narang, Gaurav Mishra, Adams Yu, Vincent Zhao, Yanping Huang, Andrew Dai, Hongkun Yu, Slav Petrov, Ed H. Chi, Jeff Dean, Jacob Devlin, Adam Roberts, Denny Zhou, Quoc V. Le, and Jason Wei
|
||||||
|
1. **[FLAN-UL2](https://huggingface.co/docs/transformers/model_doc/flan-ul2)** (from Google AI) released in the repository [google-research/t5x](https://github.com/google-research/t5x/blob/main/docs/models.md#flan-ul2-checkpoints) by Hyung Won Chung, Le Hou, Shayne Longpre, Barret Zoph, Yi Tay, William Fedus, Eric Li, Xuezhi Wang, Mostafa Dehghani, Siddhartha Brahma, Albert Webson, Shixiang Shane Gu, Zhuyun Dai, Mirac Suzgun, Xinyun Chen, Aakanksha Chowdhery, Sharan Narang, Gaurav Mishra, Adams Yu, Vincent Zhao, Yanping Huang, Andrew Dai, Hongkun Yu, Slav Petrov, Ed H. Chi, Jeff Dean, Jacob Devlin, Adam Roberts, Denny Zhou, Quoc V. Le, and Jason Wei
|
||||||
|
1. **[FlauBERT](https://huggingface.co/docs/transformers/model_doc/flaubert)** (CNRS से) साथ वाला पेपर [FlauBERT: Unsupervised Language Model Pre-training for फ़्रेंच](https://arxiv .org/abs/1912.05372) Hang Le, Loïc Vial, Jibril Frej, Vincent Segonne, Maximin Coavoux, बेंजामिन लेकोउटेक्स, अलेक्जेंड्रे अल्लाउज़ेन, बेनोइट क्रैबे, लॉरेंट बेसेसियर, डिडिएर श्वाब द्वारा।
|
||||||
|
1. **[FLAVA](https://huggingface.co/docs/transformers/model_doc/flava)** (FLAVA: A फाउंडेशनल लैंग्वेज एंड विजन अलाइनमेंट मॉडल) (https://arxiv) साथ वाला पेपर .org/abs/2112.04482) अमनप्रीत सिंह, रोंगहांग हू, वेदानुज गोस्वामी, गुइल्यूम कुएरॉन, वोज्शिएक गालुबा, मार्कस रोहरबैक, और डौवे कीला द्वारा।
|
||||||
|
1. **[FNet](https://huggingface.co/docs/transformers/model_doc/fnet)** (गूगल रिसर्च से) साथ वाला पेपर [FNet: मिक्सिंग टोकन विद फूरियर ट्रांसफॉर्म्स](https://arxiv.org /abs/2105.03824) जेम्स ली-थॉर्प, जोशुआ आइंस्ली, इल्या एकस्टीन, सैंटियागो ओंटानन द्वारा।
|
||||||
|
1. **[FocalNet](https://huggingface.co/docs/transformers/model_doc/focalnet)** (Microsoft Research से) Jianwei Yang, Chunyuan Li, Xiyang Dai, Lu Yuan, Jianfeng Gao. द्वाराअनुसंधान पत्र [Focal Modulation Networks](https://arxiv.org/abs/2203.11926) के साथ जारी किया गया
|
||||||
|
1. **[Funnel Transformer](https://huggingface.co/docs/transformers/model_doc/funnel)** (सीएमयू/गूगल ब्रेन से) साथ में कागज [फ़नल-ट्रांसफॉर्मर: कुशल भाषा प्रसंस्करण के लिए अनुक्रमिक अतिरेक को छानना](https://arxiv.org/abs/2006.03236) जिहांग दाई, गुओकुन लाई, यिमिंग यांग, क्वोक वी. ले द्वारा रिहाई।
|
||||||
|
1. **[GIT](https://huggingface.co/docs/transformers/model_doc/git)** (from Microsoft Research) released with the paper [GIT: A Generative Image-to-text Transformer for Vision and Language](https://arxiv.org/abs/2205.14100) by Jianfeng Wang, Zhengyuan Yang, Xiaowei Hu, Linjie Li, Kevin Lin, Zhe Gan, Zicheng Liu, Ce Liu, Lijuan Wang.
|
||||||
|
1. **[GLPN](https://huggingface.co/docs/transformers/model_doc/glpn)** (KAIST से) साथ वाला पेपर [वर्टिकल कटडेप्थ के साथ मोनोकुलर डेप्थ एस्टीमेशन के लिए ग्लोबल-लोकल पाथ नेटवर्क्स](https:/ /arxiv.org/abs/2201.07436) डोयोन किम, वूंगह्युन गा, प्युंगवान आह, डोंगग्यू जू, सेहवान चुन, जुनमो किम द्वारा।
|
||||||
|
1. **[GPT](https://huggingface.co/docs/transformers/model_doc/openai-gpt)** (OpenAI से) साथ में दिया गया पेपर [जेनरेटिव प्री-ट्रेनिंग द्वारा भाषा की समझ में सुधार](https://blog .openai.com/language-unsupervised/) एलेक रैडफोर्ड, कार्तिक नरसिम्हन, टिम सालिमन्स और इल्या सुत्स्केवर द्वारा।
|
||||||
|
1. **[GPT Neo](https://huggingface.co/docs/transformers/model_doc/gpt_neo)** (EleutherAI से) रिपॉजिटरी के साथ [EleutherAI/gpt-neo](https://github.com/ EleutherAI /gpt-neo) रिलीज। सिड ब्लैक, स्टेला बिडरमैन, लियो गाओ, फिल वांग और कॉनर लेही द्वारा पोस्ट किया गया।
|
||||||
|
1. **[GPT NeoX](https://huggingface.co/docs/transformers/model_doc/gpt_neox)** (EleutherAI से) पेपर के साथ जारी किया गया [GPT-NeoX-20B: एक ओपन-सोर्स ऑटोरेग्रेसिव लैंग्वेज मॉडल] (https://arxiv.org/abs/2204.06745) सिड ब्लैक, स्टेला बिडरमैन, एरिक हैलाहन, क्वेंटिन एंथोनी, लियो गाओ, लॉरेंस गोल्डिंग, होरेस हे, कॉनर लेही, काइल मैकडोनेल, जेसन फांग, माइकल पाइलर, यूएसवीएसएन साई प्रशांत द्वारा , शिवांशु पुरोहित, लारिया रेनॉल्ड्स, जोनाथन टो, बेन वांग, सैमुअल वेनबैक
|
||||||
|
1. **[GPT NeoX Japanese](https://huggingface.co/docs/transformers/model_doc/gpt_neox_japanese)** (अबेजा के जरिए) शिन्या ओटानी, ताकायोशी मकाबे, अनुज अरोड़ा, क्यो हटोरी द्वारा।
|
||||||
|
1. **[GPT-2](https://huggingface.co/docs/transformers/model_doc/gpt2)** (ओपनएआई से) साथ में पेपर [लैंग्वेज मॉडल्स अनसुपरवाइज्ड मल्टीटास्क लर्नर्स हैं](https://blog.openai.com/better-language-models/) एलेक रैडफोर्ड*, जेफरी वू*, रेवन चाइल्ड, डेविड लुआन, डारियो एमोडी* द्वारा * और इल्या सुत्सकेवर** ने पोस्ट किया।
|
||||||
|
1. **[GPT-J](https://huggingface.co/docs/transformers/model_doc/gptj)** (EleutherAI से) साथ वाला पेपर [kingoflolz/mesh-transformer-jax](https://github. com/kingoflolz/mesh-transformer-jax/) बेन वांग और अरन कोमात्सुजाकी द्वारा।
|
||||||
|
1. **[GPT-Sw3](https://huggingface.co/docs/transformers/model_doc/gpt-sw3)** (from AI-Sweden) released with the paper [Lessons Learned from GPT-SW3: Building the First Large-Scale Generative Language Model for Swedish](http://www.lrec-conf.org/proceedings/lrec2022/pdf/2022.lrec-1.376.pdf) by Ariel Ekgren, Amaru Cuba Gyllensten, Evangelia Gogoulou, Alice Heiman, Severine Verlinden, Joey Öhman, Fredrik Carlsson, Magnus Sahlgren.
|
||||||
|
1. **[GPTBigCode](https://huggingface.co/docs/transformers/model_doc/gpt_bigcode)** (BigCode से) Loubna Ben Allal, Raymond Li, Denis Kocetkov, Chenghao Mou, Christopher Akiki, Carlos Munoz Ferrandis, Niklas Muennighoff, Mayank Mishra, Alex Gu, Manan Dey, Logesh Kumar Umapathi, Carolyn Jane Anderson, Yangtian Zi, Joel Lamy Poirier, Hailey Schoelkopf, Sergey Troshin, Dmitry Abulkhanov, Manuel Romero, Michael Lappert, Francesco De Toni, Bernardo García del Río, Qian Liu, Shamik Bose, Urvashi Bhattacharyya, Terry Yue Zhuo, Ian Yu, Paulo Villegas, Marco Zocca, Sourab Mangrulkar, David Lansky, Huu Nguyen, Danish Contractor, Luis Villa, Jia Li, Dzmitry Bahdanau, Yacine Jernite, Sean Hughes, Daniel Fried, Arjun Guha, Harm de Vries, Leandro von Werra. द्वाराअनुसंधान पत्र [SantaCoder: don't reach for the stars!](https://arxiv.org/abs/2301.03988) के साथ जारी किया गया
|
||||||
|
1. **[GPTSAN-japanese](https://huggingface.co/docs/transformers/model_doc/gptsan-japanese)** released in the repository [tanreinama/GPTSAN](https://github.com/tanreinama/GPTSAN/blob/main/report/model.md) by Toshiyuki Sakamoto(tanreinama).
|
||||||
|
1. **[Graphormer](https://huggingface.co/docs/transformers/model_doc/graphormer)** (from Microsoft) released with the paper [Do Transformers Really Perform Bad for Graph Representation?](https://arxiv.org/abs/2106.05234) by Chengxuan Ying, Tianle Cai, Shengjie Luo, Shuxin Zheng, Guolin Ke, Di He, Yanming Shen, Tie-Yan Liu.
|
||||||
|
1. **[GroupViT](https://huggingface.co/docs/transformers/model_doc/groupvit)** (UCSD, NVIDIA से) साथ में कागज [GroupViT: टेक्स्ट सुपरविजन से सिमेंटिक सेगमेंटेशन इमर्जेस](https://arxiv .org/abs/2202.11094) जियारुई जू, शालिनी डी मेलो, सिफ़ी लियू, वोनमिन बायन, थॉमस ब्रेउएल, जान कौट्ज़, ज़ियाओलोंग वांग द्वारा।
|
||||||
|
1. **[Hubert](https://huggingface.co/docs/transformers/model_doc/hubert)** (फेसबुक से) साथ में पेपर [ह्यूबर्ट: सेल्फ सुपरवाइज्ड स्पीच रिप्रेजेंटेशन लर्निंग बाय मास्क्ड प्रेडिक्शन ऑफ हिडन यूनिट्स](https ://arxiv.org/abs/2106.07447) वेई-निंग सू, बेंजामिन बोल्टे, याओ-हंग ह्यूबर्ट त्साई, कुशाल लखोटिया, रुस्लान सालाखुतदीनोव, अब्देलरहमान मोहम्मद द्वारा।
|
||||||
|
1. **[I-BERT](https://huggingface.co/docs/transformers/model_doc/ibert)** (बर्कले से) साथ में कागज [I-BERT: Integer-only BERT Quantization](https:// arxiv.org/abs/2101.01321) सेहून किम, अमीर घोलमी, ज़ेवेई याओ, माइकल डब्ल्यू महोनी, कर्ट केटज़र द्वारा।
|
||||||
|
1. **[ImageGPT](https://huggingface.co/docs/transformers/model_doc/imagegpt)** (from OpenAI) released with the paper [Generative Pretraining from Pixels](https://openai.com/blog/image-gpt/) by Mark Chen, Alec Radford, Rewon Child, Jeffrey Wu, Heewoo Jun, David Luan, Ilya Sutskever.
|
||||||
|
1. **[Informer](https://huggingface.co/docs/transformers/model_doc/informer)** (from Beihang University, UC Berkeley, Rutgers University, SEDD Company) released with the paper [Informer: Beyond Efficient Transformer for Long Sequence Time-Series Forecasting](https://arxiv.org/abs/2012.07436) by Haoyi Zhou, Shanghang Zhang, Jieqi Peng, Shuai Zhang, Jianxin Li, Hui Xiong, and Wancai Zhang.
|
||||||
|
1. **[InstructBLIP](https://huggingface.co/docs/transformers/model_doc/instructblip)** (Salesforce से) Wenliang Dai, Junnan Li, Dongxu Li, Anthony Meng Huat Tiong, Junqi Zhao, Weisheng Wang, Boyang Li, Pascale Fung, Steven Hoi. द्वाराअनुसंधान पत्र [InstructBLIP: Towards General-purpose Vision-Language Models with Instruction Tuning](https://arxiv.org/abs/2305.06500) के साथ जारी किया गया
|
||||||
|
1. **[Jukebox](https://huggingface.co/docs/transformers/model_doc/jukebox)** (from OpenAI) released with the paper [Jukebox: A Generative Model for Music](https://arxiv.org/pdf/2005.00341.pdf) by Prafulla Dhariwal, Heewoo Jun, Christine Payne, Jong Wook Kim, Alec Radford, Ilya Sutskever.
|
||||||
|
1. **[LayoutLM](https://huggingface.co/docs/transformers/model_doc/layoutlm)** (from Microsoft Research Asia) released with the paper [LayoutLM: Pre-training of Text and Layout for Document Image Understanding](https://arxiv.org/abs/1912.13318) by Yiheng Xu, Minghao Li, Lei Cui, Shaohan Huang, Furu Wei, Ming Zhou.
|
||||||
|
1. **[LayoutLMv2](https://huggingface.co/docs/transformers/model_doc/layoutlmv2)** (from Microsoft Research Asia) released with the paper [LayoutLMv2: Multi-modal Pre-training for Visually-Rich Document Understanding](https://arxiv.org/abs/2012.14740) by Yang Xu, Yiheng Xu, Tengchao Lv, Lei Cui, Furu Wei, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Wanxiang Che, Min Zhang, Lidong Zhou.
|
||||||
|
1. **[LayoutLMv3](https://huggingface.co/docs/transformers/model_doc/layoutlmv3)** (माइक्रोसॉफ्ट रिसर्च एशिया से) साथ देने वाला पेपर [लेआउटएलएमवी3: यूनिफाइड टेक्स्ट और इमेज मास्किंग के साथ दस्तावेज़ एआई के लिए पूर्व-प्रशिक्षण](https://arxiv.org/abs/2204.08387) युपन हुआंग, टेंगचाओ लव, लेई कुई, युटोंग लू, फुरु वेई द्वारा पोस्ट किया गया।
|
||||||
|
1. **[LayoutXLM](https://huggingface.co/docs/transformers/model_doc/layoutxlm)** (from Microsoft Research Asia) released with the paper [LayoutXLM: Multimodal Pre-training for Multilingual Visually-rich Document Understanding](https://arxiv.org/abs/2104.08836) by Yiheng Xu, Tengchao Lv, Lei Cui, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Furu Wei.
|
||||||
|
1. **[LED](https://huggingface.co/docs/transformers/model_doc/led)** (from AllenAI) released with the paper [Longformer: The Long-Document Transformer](https://arxiv.org/abs/2004.05150) by Iz Beltagy, Matthew E. Peters, Arman Cohan.
|
||||||
|
1. **[LeViT](https://huggingface.co/docs/transformers/model_doc/levit)** (मेटा AI से) साथ वाला पेपर [LeViT: A Vision Transformer in ConvNet's Clothing for Faster Inference](https:/ /arxiv.org/abs/2104.01136) बेन ग्राहम, अलाएल्डिन एल-नौबी, ह्यूगो टौवरन, पियरे स्टॉक, आर्मंड जौलिन, हर्वे जेगौ, मैथिज डूज़ द्वारा।
|
||||||
|
1. **[LiLT](https://huggingface.co/docs/transformers/model_doc/lilt)** (दक्षिण चीन प्रौद्योगिकी विश्वविद्यालय से) साथ में कागज [LiLT: एक सरल लेकिन प्रभावी भाषा-स्वतंत्र लेआउट ट्रांसफार्मर संरचित दस्तावेज़ समझ के लिए](https://arxiv.org/abs/2202.13669) जियापेंग वांग, लियानवेन जिन, काई डिंग द्वारा पोस्ट किया गया।
|
||||||
|
1. **[LLaMA](https://huggingface.co/docs/transformers/model_doc/llama)** (The FAIR team of Meta AI से) Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timothée Lacroix, Baptiste Rozière, Naman Goyal, Eric Hambro, Faisal Azhar, Aurelien Rodriguez, Armand Joulin, Edouard Grave, Guillaume Lample. द्वाराअनुसंधान पत्र [LLaMA: Open and Efficient Foundation Language Models](https://arxiv.org/abs/2302.13971) के साथ जारी किया गया
|
||||||
|
1. **[Llama2](https://huggingface.co/docs/transformers/model_doc/llama2)** (The FAIR team of Meta AI से) Hugo Touvron, Louis Martin, Kevin Stone, Peter Albert, Amjad Almahairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajjwal Bhargava, Shruti Bhosale, Dan Bikel, Lukas Blecher, Cristian Canton Ferrer, Moya Chen, Guillem Cucurull, David Esiobu, Jude Fernandes, Jeremy Fu, Wenyin Fu, Brian Fuller, Cynthia Gao, Vedanuj Goswami, Naman Goyal, Anthony Hartshorn, Saghar Hosseini, Rui Hou, Hakan Inan, Marcin Kardas, Viktor Kerkez Madian Khabsa, Isabel Kloumann, Artem Korenev, Punit Singh Koura, Marie-Anne Lachaux, Thibaut Lavril, Jenya Lee, Diana Liskovich, Yinghai Lu, Yuning Mao, Xavier Martinet, Todor Mihaylov, Pushka rMishra, Igor Molybog, Yixin Nie, Andrew Poulton, Jeremy Reizenstein, Rashi Rungta, Kalyan Saladi, Alan Schelten, Ruan Silva, Eric Michael Smith, Ranjan Subramanian, Xiaoqing EllenTan, Binh Tang, Ross Taylor, Adina Williams, Jian Xiang Kuan, Puxin Xu, Zheng Yan, Iliyan Zarov, Yuchen Zhang, Angela Fan, Melanie Kambadur, Sharan Narang, Aurelien Rodriguez, Robert Stojnic, Sergey Edunov, Thomas Scialom.. द्वाराअनुसंधान पत्र [Llama2: Open Foundation and Fine-Tuned Chat Models](https://ai.meta.com/research/publications/llama-2-open-foundation-and-fine-tuned-chat-models/XXX) के साथ जारी किया गया
|
||||||
|
1. **[Longformer](https://huggingface.co/docs/transformers/model_doc/longformer)** (from AllenAI) released with the paper [Longformer: The Long-Document Transformer](https://arxiv.org/abs/2004.05150) by Iz Beltagy, Matthew E. Peters, Arman Cohan.
|
||||||
|
1. **[LongT5](https://huggingface.co/docs/transformers/model_doc/longt5)** (मैंडी गुओ, जोशुआ आइंस्ली, डेविड यूथस, सैंटियागो ओंटानन, जियानमो नि, यूं-हुआन सुंग, यिनफेई यांग द्वारा पोस्ट किया गया।
|
||||||
|
1. **[LUKE](https://huggingface.co/docs/transformers/model_doc/luke)** (स्टूडियो औसिया से) साथ में पेपर [LUKE: डीप कॉन्टेक्स्टुअलाइज्ड एंटिटी रिप्रेजेंटेशन विद एंटिटी-अवेयर सेल्फ-अटेंशन](https ://arxiv.org/abs/2010.01057) Ikuya Yamada, Akari Asai, Hiroyuki Shindo, Hideaki Takeda, Yuji Matsumoto द्वारा।
|
||||||
|
1. **[LXMERT](https://huggingface.co/docs/transformers/model_doc/lxmert)** (UNC चैपल हिल से) साथ में पेपर [LXMERT: ओपन-डोमेन क्वेश्चन के लिए ट्रांसफॉर्मर से क्रॉस-मोडलिटी एनकोडर रिप्रेजेंटेशन सीखना Answering](https://arxiv.org/abs/1908.07490) हाओ टैन और मोहित बंसल द्वारा।
|
||||||
|
1. **[M-CTC-T](https://huggingface.co/docs/transformers/model_doc/mctct)** (from Facebook) released with the paper [Pseudo-Labeling For Massively Multilingual Speech Recognition](https://arxiv.org/abs/2111.00161) by Loren Lugosch, Tatiana Likhomanenko, Gabriel Synnaeve, and Ronan Collobert.
|
||||||
|
1. **[M2M100](https://huggingface.co/docs/transformers/model_doc/m2m_100)** (फेसबुक से) साथ देने वाला पेपर [बियॉन्ड इंग्लिश-सेंट्रिक मल्टीलिंगुअल मशीन ट्रांसलेशन](https://arxiv.org/ एब्स/2010.11125) एंजेला फैन, श्रुति भोसले, होल्गर श्वेन्क, झी मा, अहमद अल-किश्की, सिद्धार्थ गोयल, मनदीप बैनेस, ओनूर सेलेबी, गुइल्लाम वेन्जेक, विश्रव चौधरी, नमन गोयल, टॉम बर्च, विटाली लिपचिंस्की, सर्गेई एडुनोव, एडौर्ड द्वारा ग्रेव, माइकल औली, आर्मंड जौलिन द्वारा पोस्ट किया गया।
|
||||||
|
1. **[MarianMT](https://huggingface.co/docs/transformers/model_doc/marian)** Jörg द्वारा [OPUS](http://opus.nlpl.eu/) डेटा से प्रशिक्षित मशीनी अनुवाद मॉडल पोस्ट किया गया टाइडेमैन द्वारा। [मैरियन फ्रेमवर्क](https://marian-nmt.github.io/) माइक्रोसॉफ्ट ट्रांसलेटर टीम द्वारा विकसित।
|
||||||
|
1. **[MarkupLM](https://huggingface.co/docs/transformers/model_doc/markuplm)** (माइक्रोसॉफ्ट रिसर्च एशिया से) साथ में पेपर [मार्कअपएलएम: विजुअली-रिच डॉक्यूमेंट अंडरस्टैंडिंग के लिए टेक्स्ट और मार्कअप लैंग्वेज का प्री-ट्रेनिंग] (https://arxiv.org/abs/2110.08518) जुनलॉन्ग ली, यिहेंग जू, लेई कुई, फुरु द्वारा वी द्वारा पोस्ट किया गया।
|
||||||
|
1. **[Mask2Former](https://huggingface.co/docs/transformers/model_doc/mask2former)** (FAIR and UIUC से) Bowen Cheng, Ishan Misra, Alexander G. Schwing, Alexander Kirillov, Rohit Girdhar. द्वाराअनुसंधान पत्र [Masked-attention Mask Transformer for Universal Image Segmentation](https://arxiv.org/abs/2112.01527) के साथ जारी किया गया
|
||||||
|
1. **[MaskFormer](https://huggingface.co/docs/transformers/model_doc/maskformer)** (मेटा और UIUC से) पेपर के साथ जारी किया गया [प्रति-पिक्सेल वर्गीकरण वह सब नहीं है जिसकी आपको सिमेंटिक सेगमेंटेशन की आवश्यकता है] (https://arxiv.org/abs/2107.06278) बोवेन चेंग, अलेक्जेंडर जी. श्विंग, अलेक्जेंडर किरिलोव द्वारा >>>>>> रिबेस ठीक करें
|
||||||
|
1. **[MatCha](https://huggingface.co/docs/transformers/model_doc/matcha)** (Google AI से) Fangyu Liu, Francesco Piccinno, Syrine Krichene, Chenxi Pang, Kenton Lee, Mandar Joshi, Yasemin Altun, Nigel Collier, Julian Martin Eisenschlos. द्वाराअनुसंधान पत्र [MatCha: Enhancing Visual Language Pretraining with Math Reasoning and Chart Derendering](https://arxiv.org/abs/2212.09662) के साथ जारी किया गया
|
||||||
|
1. **[mBART](https://huggingface.co/docs/transformers/model_doc/mbart)** (फेसबुक से) साथ में पेपर [न्यूरल मशीन ट्रांसलेशन के लिए मल्टीलिंगुअल डीनोइजिंग प्री-ट्रेनिंग](https://arxiv. org/abs/2001.08210) यिनहान लियू, जियाताओ गु, नमन गोयल, जियान ली, सर्गेई एडुनोव, मार्जन ग़ज़विनिनेजाद, माइक लुईस, ल्यूक ज़ेटलमॉयर द्वारा।
|
||||||
|
1. **[mBART-50](https://huggingface.co/docs/transformers/model_doc/mbart)** (फेसबुक से) साथ में पेपर [एक्स्टेंसिबल बहुभाषी प्रीट्रेनिंग और फाइनट्यूनिंग के साथ बहुभाषी अनुवाद](https://arxiv युकिंग टैंग, चाउ ट्रान, जियान ली, पेंग-जेन चेन, नमन गोयल, विश्रव चौधरी, जियाताओ गु, एंजेला फैन द्वारा .org/abs/2008.00401)।
|
||||||
|
1. **[MEGA](https://huggingface.co/docs/transformers/model_doc/mega)** (Facebook से) Xuezhe Ma, Chunting Zhou, Xiang Kong, Junxian He, Liangke Gui, Graham Neubig, Jonathan May, and Luke Zettlemoyer. द्वाराअनुसंधान पत्र [Mega: Moving Average Equipped Gated Attention](https://arxiv.org/abs/2209.10655) के साथ जारी किया गया
|
||||||
|
1. **[Megatron-BERT](https://huggingface.co/docs/transformers/model_doc/megatron-bert)** (NVIDIA से) कागज के साथ [Megatron-LM: मॉडल का उपयोग करके बहु-अरब पैरामीटर भाषा मॉडल का प्रशिक्षण Parallelism](https://arxiv.org/abs/1909.08053) मोहम्मद शोएबी, मोस्टोफा पटवारी, राउल पुरी, पैट्रिक लेग्रेस्ले, जेरेड कैस्पर और ब्रायन कैटानज़ारो द्वारा।
|
||||||
|
1. **[Megatron-GPT2](https://huggingface.co/docs/transformers/model_doc/megatron_gpt2)** (NVIDIA से) साथ वाला पेपर [Megatron-LM: ट्रेनिंग मल्टी-बिलियन पैरामीटर लैंग्वेज मॉडल्स यूजिंग मॉडल पैरेललिज़्म] (https://arxiv.org/abs/1909.08053) मोहम्मद शोएबी, मोस्टोफा पटवारी, राउल पुरी, पैट्रिक लेग्रेस्ले, जेरेड कैस्पर और ब्रायन कैटानज़ारो द्वारा पोस्ट किया गया।
|
||||||
|
1. **[MGP-STR](https://huggingface.co/docs/transformers/model_doc/mgp-str)** (Alibaba Research से) Peng Wang, Cheng Da, and Cong Yao. द्वाराअनुसंधान पत्र [Multi-Granularity Prediction for Scene Text Recognition](https://arxiv.org/abs/2209.03592) के साथ जारी किया गया
|
||||||
|
1. **[mLUKE](https://huggingface.co/docs/transformers/model_doc/mluke)** (फ्रॉम Studio Ousia) साथ में पेपर [mLUKE: द पावर ऑफ एंटिटी रिप्रेजेंटेशन इन मल्टीलिंगुअल प्रीट्रेन्ड लैंग्वेज मॉडल्स](https://arxiv.org/abs/2110.08151) रयोकन री, इकुया यामाडा, और योशिमासा त्सुरोका द्वारा।
|
||||||
|
1. **[MMS](https://huggingface.co/docs/transformers/model_doc/mms)** (Facebook से) Vineel Pratap, Andros Tjandra, Bowen Shi, Paden Tomasello, Arun Babu, Sayani Kundu, Ali Elkahky, Zhaoheng Ni, Apoorv Vyas, Maryam Fazel-Zarandi, Alexei Baevski, Yossi Adi, Xiaohui Zhang, Wei-Ning Hsu, Alexis Conneau, Michael Auli. द्वाराअनुसंधान पत्र [Scaling Speech Technology to 1,000+ Languages](https://arxiv.org/abs/2305.13516) के साथ जारी किया गया
|
||||||
|
1. **[MobileBERT](https://huggingface.co/docs/transformers/model_doc/mobilebert)** (सीएमयू/गूगल ब्रेन से) साथ में कागज [मोबाइलबर्ट: संसाधन-सीमित उपकरणों के लिए एक कॉम्पैक्ट टास्क-अज्ञेय बीईआरटी] (https://arxiv.org/abs/2004.02984) Zhiqing Sun, Hongkun Yu, Xiaodan Song, Renjie Liu, Yiming Yang, और Denny Zhou द्वारा पोस्ट किया गया।
|
||||||
|
1. **[MobileNetV1](https://huggingface.co/docs/transformers/model_doc/mobilenet_v1)** (from Google Inc.) released with the paper [MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications](https://arxiv.org/abs/1704.04861) by Andrew G. Howard, Menglong Zhu, Bo Chen, Dmitry Kalenichenko, Weijun Wang, Tobias Weyand, Marco Andreetto, Hartwig Adam.
|
||||||
|
1. **[MobileNetV2](https://huggingface.co/docs/transformers/model_doc/mobilenet_v2)** (from Google Inc.) released with the paper [MobileNetV2: Inverted Residuals and Linear Bottlenecks](https://arxiv.org/abs/1801.04381) by Mark Sandler, Andrew Howard, Menglong Zhu, Andrey Zhmoginov, Liang-Chieh Chen.
|
||||||
|
1. **[MobileViT](https://huggingface.co/docs/transformers/model_doc/mobilevit)** (Apple से) साथ में कागज [MobileViT: लाइट-वेट, जनरल-पर्पस, और मोबाइल-फ्रेंडली विजन ट्रांसफॉर्मर] (https://arxiv.org/abs/2110.02178) सचिन मेहता और मोहम्मद रस्तगरी द्वारा पोस्ट किया गया।
|
||||||
|
1. **[MobileViTV2](https://huggingface.co/docs/transformers/model_doc/mobilevitv2)** (Apple से) Sachin Mehta and Mohammad Rastegari. द्वाराअनुसंधान पत्र [Separable Self-attention for Mobile Vision Transformers](https://arxiv.org/abs/2206.02680) के साथ जारी किया गया
|
||||||
|
1. **[MPNet](https://huggingface.co/docs/transformers/model_doc/mpnet)** (from Microsoft Research) released with the paper [MPNet: Masked and Permuted Pre-training for Language Understanding](https://arxiv.org/abs/2004.09297) by Kaitao Song, Xu Tan, Tao Qin, Jianfeng Lu, Tie-Yan Liu.
|
||||||
|
1. **[MRA](https://huggingface.co/docs/transformers/model_doc/mra)** (the University of Wisconsin - Madison से) Zhanpeng Zeng, Sourav Pal, Jeffery Kline, Glenn M Fung, Vikas Singh. द्वाराअनुसंधान पत्र [Multi Resolution Analysis (MRA)](https://arxiv.org/abs/2207.10284) के साथ जारी किया गया
|
||||||
|
1. **[MT5](https://huggingface.co/docs/transformers/model_doc/mt5)** (Google AI से) साथ वाला पेपर [mT5: एक व्यापक बहुभाषी पूर्व-प्रशिक्षित टेक्स्ट-टू-टेक्स्ट ट्रांसफॉर्मर]( https://arxiv.org/abs/2010.11934) लिंटिंग ज़ू, नोआ कॉन्सटेंट, एडम रॉबर्ट्स, मिहिर काले, रामी अल-रफू, आदित्य सिद्धांत, आदित्य बरुआ, कॉलिन रैफेल द्वारा पोस्ट किया गया।
|
||||||
|
1. **[MusicGen](https://huggingface.co/docs/transformers/model_doc/musicgen)** (from Meta) released with the paper [Simple and Controllable Music Generation](https://arxiv.org/abs/2306.05284) by Jade Copet, Felix Kreuk, Itai Gat, Tal Remez, David Kant, Gabriel Synnaeve, Yossi Adi and Alexandre Défossez.
|
||||||
|
1. **[MVP](https://huggingface.co/docs/transformers/model_doc/mvp)** (from RUC AI Box) released with the paper [MVP: Multi-task Supervised Pre-training for Natural Language Generation](https://arxiv.org/abs/2206.12131) by Tianyi Tang, Junyi Li, Wayne Xin Zhao and Ji-Rong Wen.
|
||||||
|
1. **[NAT](https://huggingface.co/docs/transformers/model_doc/nat)** (from SHI Labs) released with the paper [Neighborhood Attention Transformer](https://arxiv.org/abs/2204.07143) by Ali Hassani, Steven Walton, Jiachen Li, Shen Li, and Humphrey Shi.
|
||||||
|
1. **[Nezha](https://huggingface.co/docs/transformers/model_doc/nezha)** (हुआवेई नूह के आर्क लैब से) साथ में कागज़ [NEZHA: चीनी भाषा समझ के लिए तंत्रिका प्रासंगिक प्रतिनिधित्व](https :/ /arxiv.org/abs/1909.00204) जुन्किउ वेई, ज़ियाओज़े रेन, ज़िआओगुआंग ली, वेनयोंग हुआंग, यी लियाओ, याशेंग वांग, जियाशू लिन, शिन जियांग, जिओ चेन और कुन लियू द्वारा।
|
||||||
|
1. **[NLLB](https://huggingface.co/docs/transformers/model_doc/nllb)** (फ्रॉम मेटा) साथ में पेपर [नो लैंग्वेज लेफ्ट बिहाइंड: स्केलिंग ह्यूमन-सेंटेड मशीन ट्रांसलेशन] (https://arxiv.org/abs/2207.04672) एनएलएलबी टीम द्वारा प्रकाशित।
|
||||||
|
1. **[NLLB-MOE](https://huggingface.co/docs/transformers/model_doc/nllb-moe)** (Meta से) the NLLB team. द्वाराअनुसंधान पत्र [No Language Left Behind: Scaling Human-Centered Machine Translation](https://arxiv.org/abs/2207.04672) के साथ जारी किया गया
|
||||||
|
1. **[Nyströmformer](https://huggingface.co/docs/transformers/model_doc/nystromformer)** (विस्कॉन्सिन विश्वविद्यालय - मैडिसन से) साथ में कागज [Nyströmformer: A Nyström- आधारित एल्गोरिथम आत्म-ध्यान का अनुमान लगाने के लिए ](https://arxiv.org/abs/2102.03902) युनयांग ज़िओंग, झानपेंग ज़ेंग, रुद्रसिस चक्रवर्ती, मिंगक्सिंग टैन, ग्लेन फंग, यिन ली, विकास सिंह द्वारा पोस्ट किया गया।
|
||||||
|
1. **[OneFormer](https://huggingface.co/docs/transformers/model_doc/oneformer)** (SHI Labs से) पेपर [OneFormer: One Transformer to Rule Universal Image Segmentation](https://arxiv.org/abs/2211.06220) जितेश जैन, जिआचेन ली, मांगटिक चिउ, अली हसनी, निकिता ओरलोव, हम्फ्री शि के द्वारा जारी किया गया है।
|
||||||
|
1. **[OpenLlama](https://huggingface.co/docs/transformers/model_doc/open-llama)** (from [s-JoL](https://huggingface.co/s-JoL)) released in [Open-Llama](https://github.com/s-JoL/Open-Llama).
|
||||||
|
1. **[OPT](https://huggingface.co/docs/transformers/master/model_doc/opt)** (from Meta AI) released with the paper [OPT: Open Pre-trained Transformer Language Models](https://arxiv.org/abs/2205.01068) by Susan Zhang, Stephen Roller, Naman Goyal, Mikel Artetxe, Moya Chen, Shuohui Chen et al.
|
||||||
|
1. **[OWL-ViT](https://huggingface.co/docs/transformers/model_doc/owlvit)** (Google AI से) साथ में कागज [विज़न ट्रांसफॉर्मर्स के साथ सिंपल ओपन-वोकैबुलरी ऑब्जेक्ट डिटेक्शन](https:/ /arxiv.org/abs/2205.06230) मैथियास मिंडरर, एलेक्सी ग्रिट्सेंको, ऑस्टिन स्टोन, मैक्सिम न्यूमैन, डिर्क वीसेनबोर्न, एलेक्सी डोसोवित्स्की, अरविंद महेंद्रन, अनुराग अर्नब, मुस्तफा देहघानी, ज़ुओरन शेन, जिओ वांग, ज़ियाओहुआ झाई, थॉमस किफ़, और नील हॉल्सबी द्वारा पोस्ट किया गया।
|
||||||
|
1. **[Pegasus](https://huggingface.co/docs/transformers/model_doc/pegasus)** (from Google) released with the paper [PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive Summarization](https://arxiv.org/abs/1912.08777) by Jingqing Zhang, Yao Zhao, Mohammad Saleh and Peter J. Liu.
|
||||||
|
1. **[PEGASUS-X](https://huggingface.co/docs/transformers/model_doc/pegasus_x)** (Google की ओर से) साथ में दिया गया पेपर [लंबे इनपुट सारांश के लिए ट्रांसफ़ॉर्मरों को बेहतर तरीके से एक्सटेंड करना](https://arxiv .org/abs/2208.04347) जेसन फांग, याओ झाओ, पीटर जे लियू द्वारा।
|
||||||
|
1. **[Perceiver IO](https://huggingface.co/docs/transformers/model_doc/perceiver)** (दीपमाइंड से) साथ में पेपर [पर्सीवर आईओ: संरचित इनपुट और आउटपुट के लिए एक सामान्य वास्तुकला] (https://arxiv.org/abs/2107.14795) एंड्रयू जेगल, सेबेस्टियन बोरग्यूड, जीन-बैप्टिस्ट अलायराक, कार्ल डोर्श, कैटलिन इओनेस्कु, डेविड द्वारा डिंग, स्कंद कोप्पुला, डैनियल ज़ोरान, एंड्रयू ब्रॉक, इवान शेलहैमर, ओलिवियर हेनाफ, मैथ्यू एम। बोट्विनिक, एंड्रयू ज़िसरमैन, ओरिओल विनियल्स, जोआओ कैरेरा द्वारा पोस्ट किया गया।
|
||||||
|
1. **[PhoBERT](https://huggingface.co/docs/transformers/model_doc/phobert)** (VinAI Research से) कागज के साथ [PhoBERT: वियतनामी के लिए पूर्व-प्रशिक्षित भाषा मॉडल](https://www .aclweb.org/anthology/2020.findings-emnlp.92/) डैट क्वोक गुयेन और अन्ह तुआन गुयेन द्वारा पोस्ट किया गया।
|
||||||
|
1. **[Pix2Struct](https://huggingface.co/docs/transformers/model_doc/pix2struct)** (Google से) Kenton Lee, Mandar Joshi, Iulia Turc, Hexiang Hu, Fangyu Liu, Julian Eisenschlos, Urvashi Khandelwal, Peter Shaw, Ming-Wei Chang, Kristina Toutanova. द्वाराअनुसंधान पत्र [Pix2Struct: Screenshot Parsing as Pretraining for Visual Language Understanding](https://arxiv.org/abs/2210.03347) के साथ जारी किया गया
|
||||||
|
1. **[PLBart](https://huggingface.co/docs/transformers/model_doc/plbart)** (UCLA NLP से) साथ वाला पेपर [प्रोग्राम अंडरस्टैंडिंग एंड जेनरेशन के लिए यूनिफाइड प्री-ट्रेनिंग](https://arxiv .org/abs/2103.06333) वसी उद्दीन अहमद, सैकत चक्रवर्ती, बैशाखी रे, काई-वेई चांग द्वारा।
|
||||||
|
1. **[PoolFormer](https://huggingface.co/docs/transformers/model_doc/poolformer)** (from Sea AI Labs) released with the paper [MetaFormer is Actually What You Need for Vision](https://arxiv.org/abs/2111.11418) by Yu, Weihao and Luo, Mi and Zhou, Pan and Si, Chenyang and Zhou, Yichen and Wang, Xinchao and Feng, Jiashi and Yan, Shuicheng.
|
||||||
|
1. **[ProphetNet](https://huggingface.co/docs/transformers/model_doc/prophetnet)** (माइक्रोसॉफ्ट रिसर्च से) साथ में पेपर [ProphetNet: प्रेडिक्टिंग फ्यूचर एन-ग्राम फॉर सीक्वेंस-टू-सीक्वेंस प्री-ट्रेनिंग ](https://arxiv.org/abs/2001.04063) यू यान, वीज़ेन क्यूई, येयुन गोंग, दयाहेंग लियू, नान डुआन, जिउशेंग चेन, रुओफ़ेई झांग और मिंग झोउ द्वारा पोस्ट किया गया।
|
||||||
|
1. **[QDQBert](https://huggingface.co/docs/transformers/model_doc/qdqbert)** (NVIDIA से) साथ वाला पेपर [डीप लर्निंग इंफ़ेक्शन के लिए इंटीजर क्वांटिज़ेशन: प्रिंसिपल्स एंड एम्पिरिकल इवैल्यूएशन](https:// arxiv.org/abs/2004.09602) हाओ वू, पैट्रिक जुड, जिआओजी झांग, मिखाइल इसेव और पॉलियस माइकेविसियस द्वारा।
|
||||||
|
1. **[RAG](https://huggingface.co/docs/transformers/model_doc/rag)** (फेसबुक से) साथ में कागज [रिट्रीवल-ऑगमेंटेड जेनरेशन फॉर नॉलेज-इंटेंसिव एनएलपी टास्क](https://arxiv .org/abs/2005.11401) पैट्रिक लुईस, एथन पेरेज़, अलेक्जेंड्रा पिक्टस, फैबियो पेट्रोनी, व्लादिमीर कारपुखिन, नमन गोयल, हेनरिक कुटलर, माइक लुईस, वेन-ताउ यिह, टिम रॉकटाशेल, सेबस्टियन रिडेल, डौवे कीला द्वारा।
|
||||||
|
1. **[REALM](https://huggingface.co/docs/transformers/model_doc/realm.html)** (Google अनुसंधान से) केल्विन गु, केंटन ली, ज़ोरा तुंग, पानुपोंग पसुपत और मिंग-वेई चांग द्वारा साथ में दिया गया पेपर [REALM: रिट्रीवल-ऑगमेंटेड लैंग्वेज मॉडल प्री-ट्रेनिंग](https://arxiv.org/abs/2002.08909)।
|
||||||
|
1. **[Reformer](https://huggingface.co/docs/transformers/model_doc/reformer)** (from Google Research) released with the paper [Reformer: The Efficient Transformer](https://arxiv.org/abs/2001.04451) by Nikita Kitaev, Łukasz Kaiser, Anselm Levskaya.
|
||||||
|
1. **[RegNet](https://huggingface.co/docs/transformers/model_doc/regnet)** (META रिसर्च से) [डिज़ाइनिंग नेटवर्क डिज़ाइन स्पेस] (https://arxiv.org/) पेपर के साथ जारी किया गया एब्स/2003.13678) इलिजा राडोसावोविक, राज प्रतीक कोसाराजू, रॉस गिर्शिक, कैमिंग ही, पिओटर डॉलर द्वारा।
|
||||||
|
1. **[RemBERT](https://huggingface.co/docs/transformers/model_doc/rembert)** (गूगल रिसर्च से) साथ वाला पेपर [पूर्व-प्रशिक्षित भाषा मॉडल में एम्बेडिंग कपलिंग पर पुनर्विचार](https://arxiv .org/pdf/2010.12821.pdf) ह्युंग वोन चुंग, थिबॉल्ट फ़ेवरी, हेनरी त्साई, एम. जॉनसन, सेबेस्टियन रुडर द्वारा।
|
||||||
|
1. **[ResNet](https://huggingface.co/docs/transformers/model_doc/resnet)** (माइक्रोसॉफ्ट रिसर्च से) [डीप रेसिडुअल लर्निंग फॉर इमेज रिकग्निशन] (https://arxiv. org/abs/1512.03385) कैमिंग हे, जियांग्यु झांग, शाओकिंग रेन, जियान सन द्वारा।
|
||||||
|
1. **[RoBERTa](https://huggingface.co/docs/transformers/model_doc/roberta)** (फेसबुक से), साथ में कागज [मजबूत रूप से अनुकूलित BERT प्रीट्रेनिंग दृष्टिकोण](https://arxiv.org/abs /1907.11692) यिनहान लियू, मायल ओट, नमन गोयल, जिंगफेई डू, मंदार जोशी, डैनकी चेन, ओमर लेवी, माइक लुईस, ल्यूक ज़ेटलमॉयर, वेसेलिन स्टोयानोव द्वारा।
|
||||||
|
1. **[RoBERTa-PreLayerNorm](https://huggingface.co/docs/transformers/model_doc/roberta-prelayernorm)** (from Facebook) released with the paper [fairseq: A Fast, Extensible Toolkit for Sequence Modeling](https://arxiv.org/abs/1904.01038) by Myle Ott, Sergey Edunov, Alexei Baevski, Angela Fan, Sam Gross, Nathan Ng, David Grangier, Michael Auli.
|
||||||
|
1. **[RoCBert](https://huggingface.co/docs/transformers/model_doc/roc_bert)** (from WeChatAI) released with the paper [RoCBert: Robust Chinese Bert with Multimodal Contrastive Pretraining](https://aclanthology.org/2022.acl-long.65.pdf) by HuiSu, WeiweiShi, XiaoyuShen, XiaoZhou, TuoJi, JiaruiFang, JieZhou.
|
||||||
|
1. **[RoFormer](https://huggingface.co/docs/transformers/model_doc/roformer)** (झुईई टेक्नोलॉजी से), साथ में पेपर [रोफॉर्मर: रोटरी पोजिशन एंबेडिंग के साथ एन्हांस्ड ट्रांसफॉर्मर] (https://arxiv.org/pdf/2104.09864v1.pdf) जियानलिन सु और यू लू और शेंगफेंग पैन और बो वेन और युनफेंग लियू द्वारा प्रकाशित।
|
||||||
|
1. **[RWKV](https://huggingface.co/docs/transformers/model_doc/rwkv)** (Bo Peng से) Bo Peng. द्वाराअनुसंधान पत्र [this repo](https://github.com/BlinkDL/RWKV-LM) के साथ जारी किया गया
|
||||||
|
1. **[SegFormer](https://huggingface.co/docs/transformers/model_doc/segformer)** (from NVIDIA) released with the paper [SegFormer: Simple and Efficient Design for Semantic Segmentation with Transformers](https://arxiv.org/abs/2105.15203) by Enze Xie, Wenhai Wang, Zhiding Yu, Anima Anandkumar, Jose M. Alvarez, Ping Luo.
|
||||||
|
1. **[Segment Anything](https://huggingface.co/docs/transformers/model_doc/sam)** (Meta AI से) Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alex Berg, Wan-Yen Lo, Piotr Dollar, Ross Girshick. द्वाराअनुसंधान पत्र [Segment Anything](https://arxiv.org/pdf/2304.02643v1.pdf) के साथ जारी किया गया
|
||||||
|
1. **[SEW](https://huggingface.co/docs/transformers/model_doc/sew)** (ASAPP से) साथ देने वाला पेपर [भाषण पहचान के लिए अनसुपरवाइज्ड प्री-ट्रेनिंग में परफॉर्मेंस-एफिशिएंसी ट्रेड-ऑफ्स](https ://arxiv.org/abs/2109.06870) फेलिक्स वू, क्वांगयुन किम, जिंग पैन, क्यू हान, किलियन क्यू. वेनबर्गर, योव आर्टज़ी द्वारा।
|
||||||
|
1. **[SEW-D](https://huggingface.co/docs/transformers/model_doc/sew_d)** (ASAPP से) साथ में पेपर [भाषण पहचान के लिए अनसुपरवाइज्ड प्री-ट्रेनिंग में परफॉर्मेंस-एफिशिएंसी ट्रेड-ऑफ्स] (https://arxiv.org/abs/2109.06870) फेलिक्स वू, क्वांगयुन किम, जिंग पैन, क्यू हान, किलियन क्यू. वेनबर्गर, योआव आर्टज़ी द्वारा पोस्ट किया गया।
|
||||||
|
1. **[SpeechT5](https://huggingface.co/docs/transformers/model_doc/speecht5)** (from Microsoft Research) released with the paper [SpeechT5: Unified-Modal Encoder-Decoder Pre-Training for Spoken Language Processing](https://arxiv.org/abs/2110.07205) by Junyi Ao, Rui Wang, Long Zhou, Chengyi Wang, Shuo Ren, Yu Wu, Shujie Liu, Tom Ko, Qing Li, Yu Zhang, Zhihua Wei, Yao Qian, Jinyu Li, Furu Wei.
|
||||||
|
1. **[SpeechToTextTransformer](https://huggingface.co/docs/transformers/model_doc/speech_to_text)** (फेसबुक से), साथ में पेपर [फेयरसेक S2T: फास्ट स्पीच-टू-टेक्स्ट मॉडलिंग विद फेयरसेक](https: //arxiv.org/abs/2010.05171) चांगहान वांग, यूं तांग, जुताई मा, ऐनी वू, दिमित्रो ओखोनको, जुआन पिनो द्वारा पोस्ट किया गया。
|
||||||
|
1. **[SpeechToTextTransformer2](https://huggingface.co/docs/transformers/model_doc/speech_to_text_2)** (फेसबुक से) साथ में पेपर [लार्ज-स्केल सेल्फ- एंड सेमी-सुपरवाइज्ड लर्निंग फॉर स्पीच ट्रांसलेशन](https://arxiv.org/abs/2104.06678) चांगहान वांग, ऐनी वू, जुआन पिनो, एलेक्सी बेवस्की, माइकल औली, एलेक्सिस द्वारा Conneau द्वारा पोस्ट किया गया।
|
||||||
|
1. **[Splinter](https://huggingface.co/docs/transformers/model_doc/splinter)** (तेल अवीव यूनिवर्सिटी से) साथ में पेपर [स्पैन सिलेक्शन को प्री-ट्रेनिंग करके कुछ-शॉट क्वेश्चन आंसरिंग](https:// arxiv.org/abs/2101.00438) ओरि राम, युवल कर्स्टन, जोनाथन बेरेंट, अमीर ग्लोबर्सन, ओमर लेवी द्वारा।
|
||||||
|
1. **[SqueezeBERT](https://huggingface.co/docs/transformers/model_doc/squeezebert)** (बर्कले से) कागज के साथ [SqueezeBERT: कुशल तंत्रिका नेटवर्क के बारे में NLP को कंप्यूटर विज़न क्या सिखा सकता है?](https: //arxiv.org/abs/2006.11316) फॉरेस्ट एन. इनडोला, अल्बर्ट ई. शॉ, रवि कृष्णा, और कर्ट डब्ल्यू. केटज़र द्वारा।
|
||||||
|
1. **[SwiftFormer](https://huggingface.co/docs/transformers/model_doc/swiftformer)** (MBZUAI से) Abdelrahman Shaker, Muhammad Maaz, Hanoona Rasheed, Salman Khan, Ming-Hsuan Yang, Fahad Shahbaz Khan. द्वाराअनुसंधान पत्र [SwiftFormer: Efficient Additive Attention for Transformer-based Real-time Mobile Vision Applications](https://arxiv.org/abs/2303.15446) के साथ जारी किया गया
|
||||||
|
1. **[Swin Transformer](https://huggingface.co/docs/transformers/model_doc/swin)** (माइक्रोसॉफ्ट से) साथ में कागज [स्वाइन ट्रांसफॉर्मर: शिफ्टेड विंडोज का उपयोग कर पदानुक्रमित विजन ट्रांसफॉर्मर](https://arxiv .org/abs/2103.14030) ज़ी लियू, युटोंग लिन, यू काओ, हान हू, यिक्सुआन वेई, झेंग झांग, स्टीफन लिन, बैनिंग गुओ द्वारा।
|
||||||
|
1. **[Swin Transformer V2](https://huggingface.co/docs/transformers/model_doc/swinv2)** (Microsoft से) साथ वाला पेपर [Swin Transformer V2: स्केलिंग अप कैपेसिटी एंड रेजोल्यूशन](https:// ज़ी लियू, हान हू, युटोंग लिन, ज़ुलिआंग याओ, ज़ेंडा ज़ी, यिक्सुआन वेई, जिया निंग, यू काओ, झेंग झांग, ली डोंग, फुरु वेई, बैनिंग गुओ द्वारा arxiv.org/abs/2111.09883।
|
||||||
|
1. **[Swin2SR](https://huggingface.co/docs/transformers/model_doc/swin2sr)** (from University of Würzburg) released with the paper [Swin2SR: SwinV2 Transformer for Compressed Image Super-Resolution and Restoration](https://arxiv.org/abs/2209.11345) by Marcos V. Conde, Ui-Jin Choi, Maxime Burchi, Radu Timofte.
|
||||||
|
1. **[SwitchTransformers](https://huggingface.co/docs/transformers/model_doc/switch_transformers)** (from Google) released with the paper [Switch Transformers: Scaling to Trillion Parameter Models with Simple and Efficient Sparsity](https://arxiv.org/abs/2101.03961) by William Fedus, Barret Zoph, Noam Shazeer.
|
||||||
|
1. **[T5](https://huggingface.co/docs/transformers/model_doc/t5)** (来自 Google AI)कॉलिन रैफेल और नोम शज़ीर और एडम रॉबर्ट्स और कैथरीन ली और शरण नारंग और माइकल मटेना द्वारा साथ में पेपर [एक एकीकृत टेक्स्ट-टू-टेक्स्ट ट्रांसफॉर्मर के साथ स्थानांतरण सीखने की सीमा की खोज] (https://arxiv.org/abs/1910.10683) और यांकी झोउ और वेई ली और पीटर जे लियू।
|
||||||
|
1. **[T5v1.1](https://huggingface.co/docs/transformers/model_doc/t5v1.1)** (Google AI से) साथ वाला पेपर [google-research/text-to-text-transfer- ट्रांसफॉर्मर](https://github.com/google-research/text-to-text-transfer-transformer/blob/main/released_checkpoints.md#t511) कॉलिन रैफेल और नोम शज़ीर और एडम रॉबर्ट्स और कैथरीन ली और शरण नारंग द्वारा और माइकल मटेना और यांकी झोउ और वेई ली और पीटर जे लियू।
|
||||||
|
1. **[Table Transformer](https://huggingface.co/docs/transformers/model_doc/table-transformer)** (माइक्रोसॉफ्ट रिसर्च से) साथ में पेपर [पबटेबल्स-1एम: टूवर्ड्स कॉम्प्रिहेंसिव टेबल एक्सट्रैक्शन फ्रॉम अनस्ट्रक्चर्ड डॉक्यूमेंट्स ](https://arxiv.org/abs/2110.00061) ब्रैंडन स्मॉक, रोहित पेसाला, रॉबिन अब्राहम द्वारा पोस्ट किया गया।
|
||||||
|
1. **[TAPAS](https://huggingface.co/docs/transformers/model_doc/tapas)** (Google AI से) साथ में कागज [TAPAS: पूर्व-प्रशिक्षण के माध्यम से कमजोर पर्यवेक्षण तालिका पार्सिंग](https:// arxiv.org/abs/2004.02349) जोनाथन हर्ज़िग, पावेल क्रिज़िस्तोफ़ नोवाक, थॉमस मुलर, फ्रांसेस्को पिकिन्नो और जूलियन मार्टिन ईसेन्च्लोस द्वारा।
|
||||||
|
1. **[TAPEX](https://huggingface.co/docs/transformers/model_doc/tapex)** (माइक्रोसॉफ्ट रिसर्च से) साथ में पेपर [TAPEX: टेबल प्री-ट्रेनिंग थ्रू लर्निंग अ न्यूरल SQL एक्ज़ीक्यूटर](https: //arxiv.org/abs/2107.07653) कियान लियू, बेई चेन, जियाकी गुओ, मोर्टेज़ा ज़ियादी, ज़ेकी लिन, वीज़ू चेन, जियान-गुआंग लू द्वारा पोस्ट किया गया।
|
||||||
|
1. **[Time Series Transformer](https://huggingface.co/docs/transformers/model_doc/time_series_transformer)** (from HuggingFace).
|
||||||
|
1. **[TimeSformer](https://huggingface.co/docs/transformers/model_doc/timesformer)** (from Facebook) released with the paper [Is Space-Time Attention All You Need for Video Understanding?](https://arxiv.org/abs/2102.05095) by Gedas Bertasius, Heng Wang, Lorenzo Torresani.
|
||||||
|
1. **[Trajectory Transformer](https://huggingface.co/docs/transformers/model_doc/trajectory_transformers)** (from the University of California at Berkeley) released with the paper [Offline Reinforcement Learning as One Big Sequence Modeling Problem](https://arxiv.org/abs/2106.02039) by Michael Janner, Qiyang Li, Sergey Levine
|
||||||
|
1. **[Transformer-XL](https://huggingface.co/docs/transformers/model_doc/transfo-xl)** (Google/CMU की ओर से) कागज के साथ [संस्करण-एक्स: एक ब्लॉग मॉडल चौकस चौक मॉडल मॉडल] (https://arxivorg/abs/1901.02860) क्वोकोक वी. ले, रुस्लैन सलाखुतदी
|
||||||
|
1. **[TrOCR](https://huggingface.co/docs/transformers/model_doc/trocr)** (from Microsoft) released with the paper [TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models](https://arxiv.org/abs/2109.10282) by Minghao Li, Tengchao Lv, Lei Cui, Yijuan Lu, Dinei Florencio, Cha Zhang, Zhoujun Li, Furu Wei.
|
||||||
|
1. **[TVLT](https://huggingface.co/docs/transformers/model_doc/tvlt)** (from UNC Chapel Hill) released with the paper [TVLT: Textless Vision-Language Transformer](https://arxiv.org/abs/2209.14156) by Zineng Tang, Jaemin Cho, Yixin Nie, Mohit Bansal.
|
||||||
|
1. **[UL2](https://huggingface.co/docs/transformers/model_doc/ul2)** (from Google Research) released with the paper [Unifying Language Learning Paradigms](https://arxiv.org/abs/2205.05131v1) by Yi Tay, Mostafa Dehghani, Vinh Q. Tran, Xavier Garcia, Dara Bahri, Tal Schuster, Huaixiu Steven Zheng, Neil Houlsby, Donald Metzler
|
||||||
|
1. **[UMT5](https://huggingface.co/docs/transformers/model_doc/umt5)** (Google Research से) Hyung Won Chung, Xavier Garcia, Adam Roberts, Yi Tay, Orhan Firat, Sharan Narang, Noah Constant. द्वाराअनुसंधान पत्र [UniMax: Fairer and More Effective Language Sampling for Large-Scale Multilingual Pretraining](https://openreview.net/forum?id=kXwdL1cWOAi) के साथ जारी किया गया
|
||||||
|
1. **[UniSpeech](https://huggingface.co/docs/transformers/model_doc/unispeech)** (माइक्रोसॉफ्ट रिसर्च से) साथ में दिया गया पेपर [UniSpeech: यूनिफाइड स्पीच रिप्रेजेंटेशन लर्निंग विद लेबलेड एंड अनलेबल्ड डेटा](https:/ /arxiv.org/abs/2101.07597) चेंगई वांग, यू वू, याओ कियान, केनिची कुमातानी, शुजी लियू, फुरु वेई, माइकल ज़ेंग, ज़ुएदोंग हुआंग द्वारा।
|
||||||
|
1. **[UniSpeechSat](https://huggingface.co/docs/transformers/model_doc/unispeech-sat)** (माइक्रोसॉफ्ट रिसर्च से) कागज के साथ [UNISPEECH-SAT: यूनिवर्सल स्पीच रिप्रेजेंटेशन लर्निंग विद स्पीकर अवेयर प्री-ट्रेनिंग ](https://arxiv.org/abs/2110.05752) सानयुआन चेन, यू वू, चेंग्यी वांग, झेंगयांग चेन, झूओ चेन, शुजी लियू, जियान वू, याओ कियान, फुरु वेई, जिन्यु ली, जियांगज़ान यू द्वारा पोस्ट किया गया।
|
||||||
|
1. **[UPerNet](https://huggingface.co/docs/transformers/model_doc/upernet)** (from Peking University) released with the paper [Unified Perceptual Parsing for Scene Understanding](https://arxiv.org/abs/1807.10221) by Tete Xiao, Yingcheng Liu, Bolei Zhou, Yuning Jiang, Jian Sun.
|
||||||
|
1. **[VAN](https://huggingface.co/docs/transformers/model_doc/van)** (सिंघुआ यूनिवर्सिटी और ननकाई यूनिवर्सिटी से) साथ में पेपर [विजुअल अटेंशन नेटवर्क](https://arxiv.org/ pdf/2202.09741.pdf) मेंग-हाओ गुओ, चेंग-ज़े लू, झेंग-निंग लियू, मिंग-मिंग चेंग, शि-मिन हू द्वारा।
|
||||||
|
1. **[VideoMAE](https://huggingface.co/docs/transformers/model_doc/videomae)** (मल्टीमीडिया कम्प्यूटिंग ग्रुप, नानजिंग यूनिवर्सिटी से) साथ में पेपर [वीडियोएमएई: मास्क्ड ऑटोएन्कोडर स्व-पर्यवेक्षित वीडियो प्री-ट्रेनिंग के लिए डेटा-कुशल सीखने वाले हैं] (https://arxiv.org/abs/2203.12602) ज़ान टोंग, यिबिंग सॉन्ग, जुए द्वारा वांग, लिमिन वांग द्वारा पोस्ट किया गया।
|
||||||
|
1. **[ViLT](https://huggingface.co/docs/transformers/model_doc/vilt)** (NAVER AI Lab/Kakao Enterprise/Kakao Brain से) साथ में कागज [ViLT: Vision-and-Language Transformer बिना कनवल्शन या रीजन सुपरविजन](https://arxiv.org/abs/2102.03334) वोनजे किम, बोक्यूंग सोन, इल्डू किम द्वारा पोस्ट किया गया।
|
||||||
|
1. **[Vision Transformer (ViT)](https://huggingface.co/docs/transformers/model_doc/vit)** (गूगल एआई से) कागज के साथ [एक इमेज इज़ वर्थ 16x16 वर्ड्स: ट्रांसफॉर्मर्स फॉर इमेज रिकॉग्निशन एट स्केल](https://arxiv.org/abs/2010.11929) एलेक्सी डोसोवित्स्की, लुकास बेयर, अलेक्जेंडर कोलेसनिकोव, डिर्क वीसेनबोर्न, शियाओहुआ झाई, थॉमस अनटरथिनर, मुस्तफा देहघानी, मैथियास मिंडरर, जॉर्ज हेगोल्ड, सिल्वेन गेली, जैकब उस्ज़कोरेइट द्वारा हॉल्सबी द्वारा पोस्ट किया गया।
|
||||||
|
1. **[VisualBERT](https://huggingface.co/docs/transformers/model_doc/visual_bert)** (UCLA NLP से) साथ वाला पेपर [VisualBERT: A Simple and Performant Baseline for Vision and Language](https:/ /arxiv.org/pdf/1908.03557) लियुनियन हेरोल्ड ली, मार्क यात्स्कर, दा यिन, चो-जुई हसीह, काई-वेई चांग द्वारा।
|
||||||
|
1. **[ViT Hybrid](https://huggingface.co/docs/transformers/model_doc/vit_hybrid)** (from Google AI) released with the paper [An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale](https://arxiv.org/abs/2010.11929) by Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, Neil Houlsby.
|
||||||
|
1. **[ViTMAE](https://huggingface.co/docs/transformers/model_doc/vit_mae)** (मेटा एआई से) साथ में कागज [मास्कड ऑटोएन्कोडर स्केलेबल विजन लर्नर्स हैं](https://arxiv.org/ एब्स/2111.06377) कैमिंग हे, ज़िनेली चेन, सेनिंग ज़ी, यांगहो ली, पिओट्र डॉलर, रॉस गिर्शिक द्वारा।
|
||||||
|
1. **[ViTMSN](https://huggingface.co/docs/transformers/model_doc/vit_msn)** (मेटा एआई से) साथ में कागज [लेबल-कुशल सीखने के लिए मास्क्ड स्याम देश के नेटवर्क](https://arxiv. org/abs/2204.07141) महमूद असरान, मथिल्डे कैरन, ईशान मिश्रा, पियोट्र बोजानोवस्की, फ्लोरियन बोर्डेस, पास्कल विंसेंट, आर्मंड जौलिन, माइकल रब्बत, निकोलस बल्लास द्वारा।
|
||||||
|
1. **[ViViT](https://huggingface.co/docs/transformers/model_doc/vivit)** (from Google Research) released with the paper [ViViT: A Video Vision Transformer](https://arxiv.org/abs/2103.15691) by Anurag Arnab, Mostafa Dehghani, Georg Heigold, Chen Sun, Mario Lučić, Cordelia Schmid.
|
||||||
|
1. **[Wav2Vec2](https://huggingface.co/docs/transformers/model_doc/wav2vec2)** (फेसबुक एआई से) साथ में पेपर [wav2vec 2.0: ए फ्रेमवर्क फॉर सेल्फ-सुपरवाइज्ड लर्निंग ऑफ स्पीच रिप्रेजेंटेशन] (https://arxiv.org/abs/2006.11477) एलेक्सी बेवस्की, हेनरी झोउ, अब्देलरहमान मोहम्मद, माइकल औली द्वारा।
|
||||||
|
1. **[Wav2Vec2-Conformer](https://huggingface.co/docs/transformers/model_doc/wav2vec2-conformer)** (Facebook AI से) साथ वाला पेपर [FAIRSEQ S2T: FAIRSEQ के साथ फास्ट स्पीच-टू-टेक्स्ट मॉडलिंग ](https://arxiv.org/abs/2010.05171) चांगहान वांग, यूं तांग, जुताई मा, ऐनी वू, सरव्या पोपुरी, दिमित्रो ओखोनको, जुआन पिनो द्वारा पोस्ट किया गया।
|
||||||
|
1. **[Wav2Vec2Phoneme](https://huggingface.co/docs/transformers/model_doc/wav2vec2_phoneme)** (Facebook AI से) साथ वाला पेपर [सरल और प्रभावी जीरो-शॉट क्रॉस-लिंगुअल फोनेम रिकॉग्निशन](https:/ /arxiv.org/abs/2109.11680) कियानटोंग जू, एलेक्सी बाएव्स्की, माइकल औली द्वारा।
|
||||||
|
1. **[WavLM](https://huggingface.co/docs/transformers/model_doc/wavlm)** (माइक्रोसॉफ्ट रिसर्च से) पेपर के साथ जारी किया गया [WavLM: फुल स्टैक के लिए बड़े पैमाने पर स्व-पर्यवेक्षित पूर्व-प्रशिक्षण स्पीच प्रोसेसिंग] (https://arxiv.org/abs/2110.13900) सानयुआन चेन, चेंगयी वांग, झेंगयांग चेन, यू वू, शुजी लियू, ज़ुओ चेन, जिन्यु ली, नाओयुकी कांडा, ताकुया योशियोका, ज़िओंग जिओ, जियान वू, लॉन्ग झोउ, शुओ रेन, यानमिन कियान, याओ कियान, जियान वू, माइकल ज़ेंग, फुरु वेई।
|
||||||
|
1. **[Whisper](https://huggingface.co/docs/transformers/model_doc/whisper)** (OpenAI से) साथ में कागज [बड़े पैमाने पर कमजोर पर्यवेक्षण के माध्यम से मजबूत भाषण पहचान](https://cdn. openai.com/papers/whisper.pdf) एलेक रैडफोर्ड, जोंग वूक किम, ताओ जू, ग्रेग ब्रॉकमैन, क्रिस्टीन मैकलीवे, इल्या सुत्स्केवर द्वारा।
|
||||||
|
1. **[X-CLIP](https://huggingface.co/docs/transformers/model_doc/xclip)** (माइक्रोसॉफ्ट रिसर्च से) कागज के साथ [एक्सपैंडिंग लैंग्वेज-इमेज प्रीट्रेन्ड मॉडल फॉर जनरल वीडियो रिकग्निशन](https: //arxiv.org/abs/2208.02816) बोलिन नी, होउवेन पेंग, मिंगाओ चेन, सोंगयांग झांग, गाओफेंग मेंग, जियानलोंग फू, शिमिंग जियांग, हैबिन लिंग द्वारा।
|
||||||
|
1. **[X-MOD](https://huggingface.co/docs/transformers/model_doc/xmod)** (Meta AI से) Jonas Pfeiffer, Naman Goyal, Xi Lin, Xian Li, James Cross, Sebastian Riedel, Mikel Artetxe. द्वाराअनुसंधान पत्र [Lifting the Curse of Multilinguality by Pre-training Modular Transformers](http://dx.doi.org/10.18653/v1/2022.naacl-main.255) के साथ जारी किया गया
|
||||||
|
1. **[XGLM](https://huggingface.co/docs/transformers/model_doc/xglm)** (From Facebook AI) released with the paper [Few-shot Learning with Multilingual Language Models](https://arxiv.org/abs/2112.10668) by Xi Victoria Lin, Todor Mihaylov, Mikel Artetxe, Tianlu Wang, Shuohui Chen, Daniel Simig, Myle Ott, Naman Goyal, Shruti Bhosale, Jingfei Du, Ramakanth Pasunuru, Sam Shleifer, Punit Singh Koura, Vishrav Chaudhary, Brian O'Horo, Jeff Wang, Luke Zettlemoyer, Zornitsa Kozareva, Mona Diab, Veselin Stoyanov, Xian Li.
|
||||||
|
1. **[XLM](https://huggingface.co/docs/transformers/model_doc/xlm)** (फेसबुक से) साथ में पेपर [क्रॉस-लिंगुअल लैंग्वेज मॉडल प्रीट्रेनिंग] (https://arxiv.org/abs/1901.07291) गिलाउम लैम्पल और एलेक्सिस कोनो द्वारा।
|
||||||
|
1. **[XLM-ProphetNet](https://huggingface.co/docs/transformers/model_doc/xlm-prophetnet)** (माइक्रोसॉफ्ट रिसर्च से) साथ में कागज [ProphetNet: प्रेडिक्टिंग फ्यूचर एन-ग्राम फॉर सीक्वेंस-टू- सीक्वेंस प्री-ट्रेनिंग](https://arxiv.org/abs/2001.04063) यू यान, वीज़ेन क्यूई, येयुन गोंग, दयाहेंग लियू, नान डुआन, जिउशेंग चेन, रुओफ़ेई झांग और मिंग झोउ द्वारा।
|
||||||
|
1. **[XLM-RoBERTa](https://huggingface.co/docs/transformers/model_doc/xlm-roberta)** (फेसबुक एआई से), साथ में पेपर [अनसुपरवाइज्ड क्रॉस-लिंगुअल रिप्रेजेंटेशन लर्निंग एट स्केल] (https://arxiv.org/abs/1911.02116) एलेक्सिस कोन्यू*, कार्तिकेय खंडेलवाल*, नमन गोयल, विश्रव चौधरी, गिलाउम वेनज़ेक, फ्रांसिस्को गुज़मैन द्वारा , एडौर्ड ग्रेव, मायल ओट, ल्यूक ज़ेटलमॉयर और वेसेलिन स्टोयानोव द्वारा।
|
||||||
|
1. **[XLM-RoBERTa-XL](https://huggingface.co/docs/transformers/model_doc/xlm-roberta-xl)** (Facebook AI से) साथ में कागज [बहुभाषी नकाबपोश भाषा के लिए बड़े पैमाने पर ट्रांसफॉर्मर ] मॉडलिंग](https://arxiv.org/abs/2105.00572) नमन गोयल, जिंगफेई डू, मायल ओट, गिरि अनंतरामन, एलेक्सिस कोनो द्वारा पोस्ट किया गया।
|
||||||
|
1. **[XLM-V](https://huggingface.co/docs/transformers/model_doc/xlm-v)** (from Meta AI) released with the paper [XLM-V: Overcoming the Vocabulary Bottleneck in Multilingual Masked Language Models](https://arxiv.org/abs/2301.10472) by Davis Liang, Hila Gonen, Yuning Mao, Rui Hou, Naman Goyal, Marjan Ghazvininejad, Luke Zettlemoyer, Madian Khabsa.
|
||||||
|
1. **[XLNet](https://huggingface.co/docs/transformers/model_doc/xlnet)** (Google/CMU से) साथ वाला पेपर [XLNet: जनरलाइज्ड ऑटोरेग्रेसिव प्रीट्रेनिंग फॉर लैंग्वेज अंडरस्टैंडिंग](https://arxiv ज़ीलिन यांग*, ज़िहांग दाई*, यिमिंग यांग, जैम कार्बोनेल, रुस्लान सलाखुतदीनोव, क्वोक वी. ले द्वारा .org/abs/1906.08237)।
|
||||||
|
1. **[XLS-R](https://huggingface.co/docs/transformers/model_doc/xls_r)** (Facebook AI से) साथ वाला पेपर [XLS-R: सेल्फ सुपरवाइज्ड क्रॉस-लिंगुअल स्पीच रिप्रेजेंटेशन लर्निंग एट स्केल](https://arxiv.org/abs/2111.09296) अरुण बाबू, चांगहान वांग, एंड्रोस तजंद्रा, कुशाल लखोटिया, कियानटोंग जू, नमन गोयल, कृतिका सिंह, पैट्रिक वॉन प्लैटन, याथार्थ सराफ, जुआन पिनो, एलेक्सी बेवस्की, एलेक्सिस कोन्यू, माइकल औली द्वारा पोस्ट किया गया।
|
||||||
|
1. **[XLSR-Wav2Vec2](https://huggingface.co/docs/transformers/model_doc/xlsr_wav2vec2)** (फेसबुक एआई से) साथ में पेपर [अनसुपरवाइज्ड क्रॉस-लिंगुअल रिप्रेजेंटेशन लर्निंग फॉर स्पीच रिकग्निशन] (https://arxiv.org/abs/2006.13979) एलेक्सिस कोन्यू, एलेक्सी बेवस्की, रोनन कोलोबर्ट, अब्देलरहमान मोहम्मद, माइकल औली द्वारा।
|
||||||
|
1. **[YOLOS](https://huggingface.co/docs/transformers/model_doc/yolos)** (हुआझोंग यूनिवर्सिटी ऑफ साइंस एंड टेक्नोलॉजी से) साथ में पेपर [यू ओनली लुक एट वन सीक्वेंस: रीथिंकिंग ट्रांसफॉर्मर इन विज़न थ्रू ऑब्जेक्ट डिटेक्शन](https://arxiv.org/abs/2106.00666) युक्सिन फेंग, बेनचेंग लियाओ, जिंगगैंग वांग, जेमिन फेंग, जियांग क्यूई, रुई वू, जियानवेई नीयू, वेन्यू लियू द्वारा पोस्ट किया गया।
|
||||||
|
1. **[YOSO](https://huggingface.co/docs/transformers/model_doc/yoso)** (विस्कॉन्सिन विश्वविद्यालय - मैडिसन से) साथ में पेपर [यू ओनली सैंपल (लगभग) ज़ानपेंग ज़ेंग, युनयांग ज़िओंग द्वारा , सत्य एन. रवि, शैलेश आचार्य, ग्लेन फंग, विकास सिंह द्वारा पोस्ट किया गया।
|
||||||
|
1. एक नए मॉडल में योगदान देना चाहते हैं? नए मॉडल जोड़ने में आपका मार्गदर्शन करने के लिए हमारे पास एक **विस्तृत मार्गदर्शिका और टेम्प्लेट** है। आप उन्हें [`टेम्पलेट्स`](./templates) निर्देशिका में पा सकते हैं। पीआर शुरू करने से पहले [योगदान दिशानिर्देश] (./CONTRIBUTING.md) देखना और अनुरक्षकों से संपर्क करना या प्रतिक्रिया प्राप्त करने के लिए एक नया मुद्दा खोलना याद रखें।
|
||||||
|
|
||||||
|
यह जांचने के लिए कि क्या किसी मॉडल में पहले से ही Flax, PyTorch या TensorFlow का कार्यान्वयन है, या यदि उसके पास Tokenizers लाइब्रेरी में संबंधित टोकन है, तो [यह तालिका] (https://huggingface.co/ docs/transformers/index#supported) देखें। -फ्रेमवर्क)।
|
||||||
|
|
||||||
|
इन कार्यान्वयनों का परीक्षण कई डेटासेट पर किया गया है (देखें केस स्क्रिप्ट का उपयोग करें) और वैनिला कार्यान्वयन के लिए तुलनात्मक रूप से प्रदर्शन करना चाहिए। आप उपयोग के मामले के दस्तावेज़ [इस अनुभाग](https://huggingface.co/docs/transformers/examples) में व्यवहार का विवरण पढ़ सकते हैं।
|
||||||
|
|
||||||
|
|
||||||
|
## अधिक समझें
|
||||||
|
|
||||||
|
|अध्याय | विवरण |
|
||||||
|
|-|-|
|
||||||
|
| [दस्तावेज़ीकरण](https://huggingface.co/transformers/) | पूरा एपीआई दस्तावेज़ीकरण और ट्यूटोरियल |
|
||||||
|
| [कार्य सारांश](https://huggingface.co/docs/transformers/task_summary) | ट्रांसफॉर्मर समर्थित कार्य |
|
||||||
|
| [प्रीप्रोसेसिंग ट्यूटोरियल](https://huggingface.co/docs/transformers/preprocessing) | मॉडल के लिए डेटा तैयार करने के लिए `टोकनाइज़र` का उपयोग करना |
|
||||||
|
| [प्रशिक्षण और फाइन-ट्यूनिंग](https://huggingface.co/docs/transformers/training) | PyTorch/TensorFlow के ट्रेनिंग लूप या `ट्रेनर` API में ट्रांसफॉर्मर द्वारा दिए गए मॉडल का उपयोग करें |
|
||||||
|
| [क्विक स्टार्ट: ट्वीकिंग एंड यूज़ केस स्क्रिप्ट्स](https://github.com/huggingface/transformers/tree/main/examples) | विभिन्न कार्यों के लिए केस स्क्रिप्ट का उपयोग करें |
|
||||||
|
| [मॉडल साझा करना और अपलोड करना](https://huggingface.co/docs/transformers/model_sharing) | समुदाय के साथ अपने फाइन टूनड मॉडल अपलोड और साझा करें |
|
||||||
|
| [माइग्रेशन](https://huggingface.co/docs/transformers/migration) | `पाइटोरच-ट्रांसफॉर्मर्स` या `पाइटोरच-प्रीट्रेनड-बर्ट` से ट्रांसफॉर्मर में माइग्रेट करना |
|
||||||
|
|
||||||
|
## उद्धरण
|
||||||
|
|
||||||
|
हमने आधिकारिक तौर पर इस लाइब्रेरी का [पेपर](https://www.aclweb.org/anthology/2020.emnlp-demos.6/) प्रकाशित किया है, अगर आप ट्रान्सफ़ॉर्मर्स लाइब्रेरी का उपयोग करते हैं, तो कृपया उद्धृत करें:
|
||||||
|
```bibtex
|
||||||
|
@inproceedings{wolf-etal-2020-transformers,
|
||||||
|
title = "Transformers: State-of-the-Art Natural Language Processing",
|
||||||
|
author = "Thomas Wolf and Lysandre Debut and Victor Sanh and Julien Chaumond and Clement Delangue and Anthony Moi and Pierric Cistac and Tim Rault and Rémi Louf and Morgan Funtowicz and Joe Davison and Sam Shleifer and Patrick von Platen and Clara Ma and Yacine Jernite and Julien Plu and Canwen Xu and Teven Le Scao and Sylvain Gugger and Mariama Drame and Quentin Lhoest and Alexander M. Rush",
|
||||||
|
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: System Demonstrations",
|
||||||
|
month = oct,
|
||||||
|
year = "2020",
|
||||||
|
address = "Online",
|
||||||
|
publisher = "Association for Computational Linguistics",
|
||||||
|
url = "https://www.aclweb.org/anthology/2020.emnlp-demos.6",
|
||||||
|
pages = "38--45"
|
||||||
|
}
|
||||||
|
```
|
||||||
549
README_ja.md
Normal file
549
README_ja.md
Normal file
@ -0,0 +1,549 @@
|
|||||||
|
<!---
|
||||||
|
Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
-->
|
||||||
|
|
||||||
|
<!---
|
||||||
|
A useful guide for English-Traditional Japanese translation of Hugging Face documentation
|
||||||
|
- Use square quotes, e.g.,「引用」
|
||||||
|
|
||||||
|
Dictionary
|
||||||
|
|
||||||
|
API: API(翻訳しない)
|
||||||
|
add: 追加
|
||||||
|
checkpoint: チェックポイント
|
||||||
|
code: コード
|
||||||
|
community: コミュニティ
|
||||||
|
confidence: 信頼度
|
||||||
|
dataset: データセット
|
||||||
|
documentation: ドキュメント
|
||||||
|
example: 例
|
||||||
|
finetune: 微調整
|
||||||
|
Hugging Face: Hugging Face(翻訳しない)
|
||||||
|
implementation: 実装
|
||||||
|
inference: 推論
|
||||||
|
library: ライブラリ
|
||||||
|
module: モジュール
|
||||||
|
NLP/Natural Language Processing: NLPと表示される場合は翻訳されず、Natural Language Processingと表示される場合は翻訳される
|
||||||
|
online demos: オンラインデモ
|
||||||
|
pipeline: pipeline(翻訳しない)
|
||||||
|
pretrained/pretrain: 学習済み
|
||||||
|
Python data structures (e.g., list, set, dict): リスト、セット、ディクショナリと訳され、括弧内は原文英語
|
||||||
|
repository: repository(翻訳しない)
|
||||||
|
summary: 概要
|
||||||
|
token-: token-(翻訳しない)
|
||||||
|
Trainer: Trainer(翻訳しない)
|
||||||
|
transformer: transformer(翻訳しない)
|
||||||
|
tutorial: チュートリアル
|
||||||
|
user: ユーザ
|
||||||
|
-->
|
||||||
|
|
||||||
|
<p align="center">
|
||||||
|
<br>
|
||||||
|
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers_logo_name.png" width="400"/>
|
||||||
|
<br>
|
||||||
|
<p>
|
||||||
|
<p align="center">
|
||||||
|
<a href="https://circleci.com/gh/huggingface/transformers">
|
||||||
|
<img alt="Build" src="https://img.shields.io/circleci/build/github/huggingface/transformers/main">
|
||||||
|
</a>
|
||||||
|
<a href="https://github.com/huggingface/transformers/blob/main/LICENSE">
|
||||||
|
<img alt="GitHub" src="https://img.shields.io/github/license/huggingface/transformers.svg?color=blue">
|
||||||
|
</a>
|
||||||
|
<a href="https://huggingface.co/docs/transformers/index">
|
||||||
|
<img alt="Documentation" src="https://img.shields.io/website/http/huggingface.co/docs/transformers/index.svg?down_color=red&down_message=offline&up_message=online">
|
||||||
|
</a>
|
||||||
|
<a href="https://github.com/huggingface/transformers/releases">
|
||||||
|
<img alt="GitHub release" src="https://img.shields.io/github/release/huggingface/transformers.svg">
|
||||||
|
</a>
|
||||||
|
<a href="https://github.com/huggingface/transformers/blob/main/CODE_OF_CONDUCT.md">
|
||||||
|
<img alt="Contributor Covenant" src="https://img.shields.io/badge/Contributor%20Covenant-v2.0%20adopted-ff69b4.svg">
|
||||||
|
</a>
|
||||||
|
<a href="https://zenodo.org/badge/latestdoi/155220641"><img src="https://zenodo.org/badge/155220641.svg" alt="DOI"></a>
|
||||||
|
</p>
|
||||||
|
|
||||||
|
<h4 align="center">
|
||||||
|
<p>
|
||||||
|
<a href="https://github.com/huggingface/transformers/">English</a> |
|
||||||
|
<a href="https://github.com/huggingface/transformers/blob/main/README_zh-hans.md">简体中文</a> |
|
||||||
|
<a href="https://github.com/huggingface/transformers/blob/main/README_zh-hant.md">繁體中文</a> |
|
||||||
|
<a href="https://github.com/huggingface/transformers/blob/main/README_ko.md">한국어</a> |
|
||||||
|
<a href="https://github.com/huggingface/transformers/blob/main/README_es.md">Español</a> |
|
||||||
|
<b>日本語</b> |
|
||||||
|
<a href="https://github.com/huggingface/transformers/blob/main/README_hd.md">हिन्दी</a>
|
||||||
|
<p>
|
||||||
|
</h4>
|
||||||
|
|
||||||
|
<h3 align="center">
|
||||||
|
<p>JAX、PyTorch、TensorFlowのための最先端機械学習</p>
|
||||||
|
</h3>
|
||||||
|
|
||||||
|
<h3 align="center">
|
||||||
|
<a href="https://hf.co/course"><img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/course_banner.png"></a>
|
||||||
|
</h3>
|
||||||
|
|
||||||
|
🤗Transformersは、テキスト、視覚、音声などの異なるモダリティに対してタスクを実行するために、事前に学習させた数千のモデルを提供します。
|
||||||
|
|
||||||
|
これらのモデルは次のような場合に適用できます:
|
||||||
|
|
||||||
|
* 📝 テキストは、テキストの分類、情報抽出、質問応答、要約、翻訳、テキスト生成などのタスクのために、100以上の言語に対応しています。
|
||||||
|
* 🖼️ 画像分類、物体検出、セグメンテーションなどのタスクのための画像。
|
||||||
|
* 🗣️ 音声は、音声認識や音声分類などのタスクに使用します。
|
||||||
|
|
||||||
|
トランスフォーマーモデルは、テーブル質問応答、光学文字認識、スキャン文書からの情報抽出、ビデオ分類、視覚的質問応答など、**複数のモダリティを組み合わせた**タスクも実行可能です。
|
||||||
|
|
||||||
|
🤗Transformersは、与えられたテキストに対してそれらの事前学習されたモデルを素早くダウンロードして使用し、あなた自身のデータセットでそれらを微調整し、私たちの[model hub](https://huggingface.co/models)でコミュニティと共有するためのAPIを提供します。同時に、アーキテクチャを定義する各Pythonモジュールは完全にスタンドアロンであり、迅速な研究実験を可能にするために変更することができます。
|
||||||
|
|
||||||
|
🤗Transformersは[Jax](https://jax.readthedocs.io/en/latest/)、[PyTorch](https://pytorch.org/)、[TensorFlow](https://www.tensorflow.org/)という3大ディープラーニングライブラリーに支えられ、それぞれのライブラリをシームレスに統合しています。片方でモデルを学習してから、もう片方で推論用にロードするのは簡単なことです。
|
||||||
|
|
||||||
|
## オンラインデモ
|
||||||
|
|
||||||
|
[model hub](https://huggingface.co/models)から、ほとんどのモデルのページで直接テストすることができます。また、パブリックモデル、プライベートモデルに対して、[プライベートモデルのホスティング、バージョニング、推論API](https://huggingface.co/pricing)を提供しています。
|
||||||
|
|
||||||
|
以下はその一例です:
|
||||||
|
|
||||||
|
自然言語処理にて:
|
||||||
|
- [BERTによるマスクドワード補完](https://huggingface.co/bert-base-uncased?text=Paris+is+the+%5BMASK%5D+of+France)
|
||||||
|
- [Electraによる名前実体認識](https://huggingface.co/dbmdz/electra-large-discriminator-finetuned-conll03-english?text=My+name+is+Sarah+and+I+live+in+London+city)
|
||||||
|
- [GPT-2によるテキスト生成](https://huggingface.co/gpt2?text=A+long+time+ago%2C+)
|
||||||
|
- [RoBERTaによる自然言語推論](https://huggingface.co/roberta-large-mnli?text=The+dog+was+lost.+Nobody+lost+any+animal)
|
||||||
|
- [BARTによる要約](https://huggingface.co/facebook/bart-large-cnn?text=The+tower+is+324+metres+%281%2C063+ft%29+tall%2C+about+the+same+height+as+an+81-storey+building%2C+and+the+tallest+structure+in+Paris.+Its+base+is+square%2C+measuring+125+metres+%28410+ft%29+on+each+side.+During+its+construction%2C+the+Eiffel+Tower+surpassed+the+Washington+Monument+to+become+the+tallest+man-made+structure+in+the+world%2C+a+title+it+held+for+41+years+until+the+Chrysler+Building+in+New+York+City+was+finished+in+1930.+It+was+the+first+structure+to+reach+a+height+of+300+metres.+Due+to+the+addition+of+a+broadcasting+aerial+at+the+top+of+the+tower+in+1957%2C+it+is+now+taller+than+the+Chrysler+Building+by+5.2+metres+%2817+ft%29.+Excluding+transmitters%2C+the+Eiffel+Tower+is+the+second+tallest+free-standing+structure+in+France+after+the+Millau+Viaduct)
|
||||||
|
- [DistilBERTによる質問応答](https://huggingface.co/distilbert-base-uncased-distilled-squad?text=Which+name+is+also+used+to+describe+the+Amazon+rainforest+in+English%3F&context=The+Amazon+rainforest+%28Portuguese%3A+Floresta+Amaz%C3%B4nica+or+Amaz%C3%B4nia%3B+Spanish%3A+Selva+Amaz%C3%B3nica%2C+Amazon%C3%ADa+or+usually+Amazonia%3B+French%3A+For%C3%AAt+amazonienne%3B+Dutch%3A+Amazoneregenwoud%29%2C+also+known+in+English+as+Amazonia+or+the+Amazon+Jungle%2C+is+a+moist+broadleaf+forest+that+covers+most+of+the+Amazon+basin+of+South+America.+This+basin+encompasses+7%2C000%2C000+square+kilometres+%282%2C700%2C000+sq+mi%29%2C+of+which+5%2C500%2C000+square+kilometres+%282%2C100%2C000+sq+mi%29+are+covered+by+the+rainforest.+This+region+includes+territory+belonging+to+nine+nations.+The+majority+of+the+forest+is+contained+within+Brazil%2C+with+60%25+of+the+rainforest%2C+followed+by+Peru+with+13%25%2C+Colombia+with+10%25%2C+and+with+minor+amounts+in+Venezuela%2C+Ecuador%2C+Bolivia%2C+Guyana%2C+Suriname+and+French+Guiana.+States+or+departments+in+four+nations+contain+%22Amazonas%22+in+their+names.+The+Amazon+represents+over+half+of+the+planet%27s+remaining+rainforests%2C+and+comprises+the+largest+and+most+biodiverse+tract+of+tropical+rainforest+in+the+world%2C+with+an+estimated+390+billion+individual+trees+divided+into+16%2C000+species)
|
||||||
|
- [T5による翻訳](https://huggingface.co/t5-base?text=My+name+is+Wolfgang+and+I+live+in+Berlin)
|
||||||
|
|
||||||
|
コンピュータビジョンにて:
|
||||||
|
- [ViTによる画像分類](https://huggingface.co/google/vit-base-patch16-224)
|
||||||
|
- [DETRによる物体検出](https://huggingface.co/facebook/detr-resnet-50)
|
||||||
|
- [SegFormerによるセマンティックセグメンテーション](https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512)
|
||||||
|
- [DETRによるパノプティックセグメンテーション](https://huggingface.co/facebook/detr-resnet-50-panoptic)
|
||||||
|
|
||||||
|
オーディオにて:
|
||||||
|
- [Wav2Vec2による自動音声認識](https://huggingface.co/facebook/wav2vec2-base-960h)
|
||||||
|
- [Wav2Vec2によるキーワード検索](https://huggingface.co/superb/wav2vec2-base-superb-ks)
|
||||||
|
|
||||||
|
マルチモーダルなタスクにて:
|
||||||
|
- [ViLTによる視覚的質問応答](https://huggingface.co/dandelin/vilt-b32-finetuned-vqa)
|
||||||
|
|
||||||
|
Hugging Faceチームによって作られた **[トランスフォーマーを使った書き込み](https://transformer.huggingface.co)** は、このリポジトリのテキスト生成機能の公式デモである。
|
||||||
|
|
||||||
|
## Hugging Faceチームによるカスタム・サポートをご希望の場合
|
||||||
|
|
||||||
|
<a target="_blank" href="https://huggingface.co/support">
|
||||||
|
<img alt="HuggingFace Expert Acceleration Program" src="https://cdn-media.huggingface.co/marketing/transformers/new-support-improved.png" style="max-width: 600px; border: 1px solid #eee; border-radius: 4px; box-shadow: 0 1px 2px 0 rgba(0, 0, 0, 0.05);">
|
||||||
|
</a><br>
|
||||||
|
|
||||||
|
## クイックツアー
|
||||||
|
|
||||||
|
与えられた入力(テキスト、画像、音声、...)に対してすぐにモデルを使うために、我々は`pipeline`というAPIを提供しております。pipelineは、学習済みのモデルと、そのモデルの学習時に使用された前処理をグループ化したものです。以下は、肯定的なテキストと否定的なテキストを分類するためにpipelineを使用する方法です:
|
||||||
|
|
||||||
|
```python
|
||||||
|
>>> from transformers import pipeline
|
||||||
|
|
||||||
|
# Allocate a pipeline for sentiment-analysis
|
||||||
|
>>> classifier = pipeline('sentiment-analysis')
|
||||||
|
>>> classifier('We are very happy to introduce pipeline to the transformers repository.')
|
||||||
|
[{'label': 'POSITIVE', 'score': 0.9996980428695679}]
|
||||||
|
```
|
||||||
|
|
||||||
|
2行目のコードでは、pipelineで使用される事前学習済みモデルをダウンロードしてキャッシュし、3行目では与えられたテキストに対してそのモデルを評価します。ここでは、答えは99.97%の信頼度で「ポジティブ」です。
|
||||||
|
|
||||||
|
自然言語処理だけでなく、コンピュータビジョンや音声処理においても、多くのタスクにはあらかじめ訓練された`pipeline`が用意されている。例えば、画像から検出された物体を簡単に抽出することができる:
|
||||||
|
|
||||||
|
``` python
|
||||||
|
>>> import requests
|
||||||
|
>>> from PIL import Image
|
||||||
|
>>> from transformers import pipeline
|
||||||
|
|
||||||
|
# Download an image with cute cats
|
||||||
|
>>> url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/coco_sample.png"
|
||||||
|
>>> image_data = requests.get(url, stream=True).raw
|
||||||
|
>>> image = Image.open(image_data)
|
||||||
|
|
||||||
|
# Allocate a pipeline for object detection
|
||||||
|
>>> object_detector = pipeline('object-detection')
|
||||||
|
>>> object_detector(image)
|
||||||
|
[{'score': 0.9982201457023621,
|
||||||
|
'label': 'remote',
|
||||||
|
'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
|
||||||
|
{'score': 0.9960021376609802,
|
||||||
|
'label': 'remote',
|
||||||
|
'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
|
||||||
|
{'score': 0.9954745173454285,
|
||||||
|
'label': 'couch',
|
||||||
|
'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
|
||||||
|
{'score': 0.9988006353378296,
|
||||||
|
'label': 'cat',
|
||||||
|
'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
|
||||||
|
{'score': 0.9986783862113953,
|
||||||
|
'label': 'cat',
|
||||||
|
'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}}]
|
||||||
|
```
|
||||||
|
|
||||||
|
ここでは、画像から検出されたオブジェクトのリストが得られ、オブジェクトを囲むボックスと信頼度スコアが表示されます。左側が元画像、右側が予測結果を表示したものです:
|
||||||
|
|
||||||
|
<h3 align="center">
|
||||||
|
<a><img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/coco_sample.png" width="400"></a>
|
||||||
|
<a><img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/coco_sample_post_processed.png" width="400"></a>
|
||||||
|
</h3>
|
||||||
|
|
||||||
|
[このチュートリアル](https://huggingface.co/docs/transformers/task_summary)では、`pipeline`APIでサポートされているタスクについて詳しく説明しています。
|
||||||
|
|
||||||
|
`pipeline`に加えて、与えられたタスクに学習済みのモデルをダウンロードして使用するために必要なのは、3行のコードだけです。以下はPyTorchのバージョンです:
|
||||||
|
```python
|
||||||
|
>>> from transformers import AutoTokenizer, AutoModel
|
||||||
|
|
||||||
|
>>> tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
|
||||||
|
>>> model = AutoModel.from_pretrained("bert-base-uncased")
|
||||||
|
|
||||||
|
>>> inputs = tokenizer("Hello world!", return_tensors="pt")
|
||||||
|
>>> outputs = model(**inputs)
|
||||||
|
```
|
||||||
|
|
||||||
|
And here is the equivalent code for TensorFlow:
|
||||||
|
```python
|
||||||
|
>>> from transformers import AutoTokenizer, TFAutoModel
|
||||||
|
|
||||||
|
>>> tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
|
||||||
|
>>> model = TFAutoModel.from_pretrained("bert-base-uncased")
|
||||||
|
|
||||||
|
>>> inputs = tokenizer("Hello world!", return_tensors="tf")
|
||||||
|
>>> outputs = model(**inputs)
|
||||||
|
```
|
||||||
|
|
||||||
|
トークナイザは学習済みモデルが期待するすべての前処理を担当し、単一の文字列 (上記の例のように) またはリストに対して直接呼び出すことができます。これは下流のコードで使用できる辞書を出力します。また、単純に ** 引数展開演算子を使用してモデルに直接渡すこともできます。
|
||||||
|
|
||||||
|
モデル自体は通常の[Pytorch `nn.Module`](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) または [TensorFlow `tf.keras.Model`](https://www.tensorflow.org/api_docs/python/tf/keras/Model) (バックエンドによって異なる)で、通常通り使用することが可能です。[このチュートリアル](https://huggingface.co/docs/transformers/training)では、このようなモデルを従来のPyTorchやTensorFlowの学習ループに統合する方法や、私たちの`Trainer`APIを使って新しいデータセットで素早く微調整を行う方法について説明します。
|
||||||
|
|
||||||
|
## なぜtransformersを使う必要があるのでしょうか?
|
||||||
|
|
||||||
|
1. 使いやすい最新モデル:
|
||||||
|
- 自然言語理解・生成、コンピュータビジョン、オーディオの各タスクで高いパフォーマンスを発揮します。
|
||||||
|
- 教育者、実務者にとっての低い参入障壁。
|
||||||
|
- 学習するクラスは3つだけで、ユーザが直面する抽象化はほとんどありません。
|
||||||
|
- 学習済みモデルを利用するための統一されたAPI。
|
||||||
|
|
||||||
|
1. 低い計算コスト、少ないカーボンフットプリント:
|
||||||
|
- 研究者は、常に再トレーニングを行うのではなく、トレーニングされたモデルを共有することができます。
|
||||||
|
- 実務家は、計算時間や生産コストを削減することができます。
|
||||||
|
- すべてのモダリティにおいて、60,000以上の事前学習済みモデルを持つ数多くのアーキテクチャを提供します。
|
||||||
|
|
||||||
|
1. モデルのライフタイムのあらゆる部分で適切なフレームワークを選択可能:
|
||||||
|
- 3行のコードで最先端のモデルをトレーニング。
|
||||||
|
- TF2.0/PyTorch/JAXフレームワーク間で1つのモデルを自在に移動させる。
|
||||||
|
- 学習、評価、生産に適したフレームワークをシームレスに選択できます。
|
||||||
|
|
||||||
|
1. モデルやサンプルをニーズに合わせて簡単にカスタマイズ可能:
|
||||||
|
- 原著者が発表した結果を再現するために、各アーキテクチャの例を提供しています。
|
||||||
|
- モデル内部は可能な限り一貫して公開されています。
|
||||||
|
- モデルファイルはライブラリとは独立して利用することができ、迅速な実験が可能です。
|
||||||
|
|
||||||
|
## なぜtransformersを使ってはいけないのでしょうか?
|
||||||
|
|
||||||
|
- このライブラリは、ニューラルネットのためのビルディングブロックのモジュール式ツールボックスではありません。モデルファイルのコードは、研究者が追加の抽象化/ファイルに飛び込むことなく、各モデルを素早く反復できるように、意図的に追加の抽象化でリファクタリングされていません。
|
||||||
|
- 学習APIはどのようなモデルでも動作するわけではなく、ライブラリが提供するモデルで動作するように最適化されています。一般的な機械学習のループには、別のライブラリ(おそらく[Accelerate](https://huggingface.co/docs/accelerate))を使用する必要があります。
|
||||||
|
- 私たちはできるだけ多くの使用例を紹介するよう努力していますが、[examples フォルダ](https://github.com/huggingface/transformers/tree/main/examples) にあるスクリプトはあくまで例です。あなたの特定の問題に対してすぐに動作するわけではなく、あなたのニーズに合わせるために数行のコードを変更する必要があることが予想されます。
|
||||||
|
|
||||||
|
## インストール
|
||||||
|
|
||||||
|
### pipにて
|
||||||
|
|
||||||
|
このリポジトリは、Python 3.6+, Flax 0.3.2+, PyTorch 1.3.1+, TensorFlow 2.3+ でテストされています。
|
||||||
|
|
||||||
|
🤗Transformersは[仮想環境](https://docs.python.org/3/library/venv.html)にインストールする必要があります。Pythonの仮想環境に慣れていない場合は、[ユーザーガイド](https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/)を確認してください。
|
||||||
|
|
||||||
|
まず、使用するバージョンのPythonで仮想環境を作成し、アクティベートします。
|
||||||
|
|
||||||
|
その後、Flax, PyTorch, TensorFlowのうち少なくとも1つをインストールする必要があります。
|
||||||
|
[TensorFlowインストールページ](https://www.tensorflow.org/install/)、[PyTorchインストールページ](https://pytorch.org/get-started/locally/#start-locally)、[Flax](https://github.com/google/flax#quick-install)、[Jax](https://github.com/google/jax#installation)インストールページで、お使いのプラットフォーム別のインストールコマンドを参照してください。
|
||||||
|
|
||||||
|
これらのバックエンドのいずれかがインストールされている場合、🤗Transformersは以下のようにpipを使用してインストールすることができます:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
pip install transformers
|
||||||
|
```
|
||||||
|
|
||||||
|
もしサンプルを試したい、またはコードの最先端が必要で、新しいリリースを待てない場合は、[ライブラリをソースからインストール](https://huggingface.co/docs/transformers/installation#installing-from-source)する必要があります。
|
||||||
|
|
||||||
|
### condaにて
|
||||||
|
|
||||||
|
Transformersバージョン4.0.0から、condaチャンネルを搭載しました: `huggingface`。
|
||||||
|
|
||||||
|
🤗Transformersは以下のようにcondaを使って設置することができます:
|
||||||
|
|
||||||
|
```shell script
|
||||||
|
conda install -c huggingface transformers
|
||||||
|
```
|
||||||
|
|
||||||
|
Flax、PyTorch、TensorFlowをcondaでインストールする方法は、それぞれのインストールページに従ってください。
|
||||||
|
|
||||||
|
> **_注意:_** Windowsでは、キャッシュの恩恵を受けるために、デベロッパーモードを有効にするよう促されることがあります。このような場合は、[このissue](https://github.com/huggingface/huggingface_hub/issues/1062)でお知らせください。
|
||||||
|
|
||||||
|
## モデルアーキテクチャ
|
||||||
|
|
||||||
|
🤗Transformersが提供する **[全モデルチェックポイント](https://huggingface.co/models)** は、[ユーザー](https://huggingface.co/users)や[組織](https://huggingface.co/organizations)によって直接アップロードされるhuggingface.co [model hub](https://huggingface.co)からシームレスに統合されています。
|
||||||
|
|
||||||
|
現在のチェックポイント数: 
|
||||||
|
|
||||||
|
🤗Transformersは現在、以下のアーキテクチャを提供しています(それぞれのハイレベルな要約は[こちら](https://huggingface.co/docs/transformers/model_summary)を参照してください):
|
||||||
|
|
||||||
|
1. **[ALBERT](https://huggingface.co/docs/transformers/model_doc/albert)** (Google Research and the Toyota Technological Institute at Chicago から) Zhenzhong Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut から公開された研究論文: [ALBERT: A Lite BERT for Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942)
|
||||||
|
1. **[ALIGN](https://huggingface.co/docs/transformers/model_doc/align)** (Google Research から) Chao Jia, Yinfei Yang, Ye Xia, Yi-Ting Chen, Zarana Parekh, Hieu Pham, Quoc V. Le, Yunhsuan Sung, Zhen Li, Tom Duerig. から公開された研究論文 [Scaling Up Visual and Vision-Language Representation Learning With Noisy Text Supervision](https://arxiv.org/abs/2102.05918)
|
||||||
|
1. **[AltCLIP](https://huggingface.co/docs/transformers/model_doc/altclip)** (BAAI から) Chen, Zhongzhi and Liu, Guang and Zhang, Bo-Wen and Ye, Fulong and Yang, Qinghong and Wu, Ledell から公開された研究論文: [AltCLIP: Altering the Language Encoder in CLIP for Extended Language Capabilities](https://arxiv.org/abs/2211.06679)
|
||||||
|
1. **[Audio Spectrogram Transformer](https://huggingface.co/docs/transformers/model_doc/audio-spectrogram-transformer)** (MIT から) Yuan Gong, Yu-An Chung, James Glass から公開された研究論文: [AST: Audio Spectrogram Transformer](https://arxiv.org/abs/2104.01778)
|
||||||
|
1. **[Autoformer](https://huggingface.co/docs/transformers/model_doc/autoformer)** (from Tsinghua University) released with the paper [Autoformer: Decomposition Transformers with Auto-Correlation for Long-Term Series Forecasting](https://arxiv.org/abs/2106.13008) by Haixu Wu, Jiehui Xu, Jianmin Wang, Mingsheng Long.
|
||||||
|
1. **[Bark](https://huggingface.co/docs/transformers/model_doc/bark)** (from Suno) released in the repository [suno-ai/bark](https://github.com/suno-ai/bark) by Suno AI team.
|
||||||
|
1. **[BART](https://huggingface.co/docs/transformers/model_doc/bart)** (Facebook から) Mike Lewis, Yinhan Liu, Naman Goyal, Marjan Ghazvininejad, Abdelrahman Mohamed, Omer Levy, Ves Stoyanov and Luke Zettlemoyer から公開された研究論文: [BART: Denoising Sequence-to-Sequence Pre-training for Natural Language Generation, Translation, and Comprehension](https://arxiv.org/abs/1910.13461)
|
||||||
|
1. **[BARThez](https://huggingface.co/docs/transformers/model_doc/barthez)** (École polytechnique から) Moussa Kamal Eddine, Antoine J.-P. Tixier, Michalis Vazirgiannis から公開された研究論文: [BARThez: a Skilled Pretrained French Sequence-to-Sequence Model](https://arxiv.org/abs/2010.12321)
|
||||||
|
1. **[BARTpho](https://huggingface.co/docs/transformers/model_doc/bartpho)** (VinAI Research から) Nguyen Luong Tran, Duong Minh Le and Dat Quoc Nguyen から公開された研究論文: [BARTpho: Pre-trained Sequence-to-Sequence Models for Vietnamese](https://arxiv.org/abs/2109.09701)
|
||||||
|
1. **[BEiT](https://huggingface.co/docs/transformers/model_doc/beit)** (Microsoft から) Hangbo Bao, Li Dong, Furu Wei から公開された研究論文: [BEiT: BERT Pre-Training of Image Transformers](https://arxiv.org/abs/2106.08254)
|
||||||
|
1. **[BERT](https://huggingface.co/docs/transformers/model_doc/bert)** (Google から) Jacob Devlin, Ming-Wei Chang, Kenton Lee and Kristina Toutanova から公開された研究論文: [BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding](https://arxiv.org/abs/1810.04805)
|
||||||
|
1. **[BERT For Sequence Generation](https://huggingface.co/docs/transformers/model_doc/bert-generation)** (Google から) Sascha Rothe, Shashi Narayan, Aliaksei Severyn から公開された研究論文: [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://arxiv.org/abs/1907.12461)
|
||||||
|
1. **[BERTweet](https://huggingface.co/docs/transformers/model_doc/bertweet)** (VinAI Research から) Dat Quoc Nguyen, Thanh Vu and Anh Tuan Nguyen から公開された研究論文: [BERTweet: A pre-trained language model for English Tweets](https://aclanthology.org/2020.emnlp-demos.2/)
|
||||||
|
1. **[BigBird-Pegasus](https://huggingface.co/docs/transformers/model_doc/bigbird_pegasus)** (Google Research から) Manzil Zaheer, Guru Guruganesh, Avinava Dubey, Joshua Ainslie, Chris Alberti, Santiago Ontanon, Philip Pham, Anirudh Ravula, Qifan Wang, Li Yang, Amr Ahmed から公開された研究論文: [Big Bird: Transformers for Longer Sequences](https://arxiv.org/abs/2007.14062)
|
||||||
|
1. **[BigBird-RoBERTa](https://huggingface.co/docs/transformers/model_doc/big_bird)** (Google Research から) Manzil Zaheer, Guru Guruganesh, Avinava Dubey, Joshua Ainslie, Chris Alberti, Santiago Ontanon, Philip Pham, Anirudh Ravula, Qifan Wang, Li Yang, Amr Ahmed から公開された研究論文: [Big Bird: Transformers for Longer Sequences](https://arxiv.org/abs/2007.14062)
|
||||||
|
1. **[BioGpt](https://huggingface.co/docs/transformers/model_doc/biogpt)** (Microsoft Research AI4Science から) Renqian Luo, Liai Sun, Yingce Xia, Tao Qin, Sheng Zhang, Hoifung Poon and Tie-Yan Liu から公開された研究論文: [BioGPT: generative pre-trained transformer for biomedical text generation and mining](https://academic.oup.com/bib/advance-article/doi/10.1093/bib/bbac409/6713511?guestAccessKey=a66d9b5d-4f83-4017-bb52-405815c907b9)
|
||||||
|
1. **[BiT](https://huggingface.co/docs/transformers/model_doc/bit)** (Google AI から) Alexander Kolesnikov, Lucas Beyer, Xiaohua Zhai, Joan Puigcerver, Jessica Yung, Sylvain Gelly, Neil から公開された研究論文: [Big Transfer (BiT)](https://arxiv.org/abs/1912.11370)Houlsby.
|
||||||
|
1. **[Blenderbot](https://huggingface.co/docs/transformers/model_doc/blenderbot)** (Facebook から) Stephen Roller, Emily Dinan, Naman Goyal, Da Ju, Mary Williamson, Yinhan Liu, Jing Xu, Myle Ott, Kurt Shuster, Eric M. Smith, Y-Lan Boureau, Jason Weston から公開された研究論文: [Recipes for building an open-domain chatbot](https://arxiv.org/abs/2004.13637)
|
||||||
|
1. **[BlenderbotSmall](https://huggingface.co/docs/transformers/model_doc/blenderbot-small)** (Facebook から) Stephen Roller, Emily Dinan, Naman Goyal, Da Ju, Mary Williamson, Yinhan Liu, Jing Xu, Myle Ott, Kurt Shuster, Eric M. Smith, Y-Lan Boureau, Jason Weston から公開された研究論文: [Recipes for building an open-domain chatbot](https://arxiv.org/abs/2004.13637)
|
||||||
|
1. **[BLIP](https://huggingface.co/docs/transformers/model_doc/blip)** (Salesforce から) Junnan Li, Dongxu Li, Caiming Xiong, Steven Hoi から公開された研究論文: [BLIP: Bootstrapping Language-Image Pre-training for Unified Vision-Language Understanding and Generation](https://arxiv.org/abs/2201.12086)
|
||||||
|
1. **[BLIP-2](https://huggingface.co/docs/transformers/model_doc/blip-2)** (Salesforce から) Junnan Li, Dongxu Li, Silvio Savarese, Steven Hoi. から公開された研究論文 [BLIP-2: Bootstrapping Language-Image Pre-training with Frozen Image Encoders and Large Language Models](https://arxiv.org/abs/2301.12597)
|
||||||
|
1. **[BLOOM](https://huggingface.co/docs/transformers/model_doc/bloom)** (BigScience workshop から) [BigScience Workshop](https://bigscience.huggingface.co/) から公開されました.
|
||||||
|
1. **[BORT](https://huggingface.co/docs/transformers/model_doc/bort)** (Alexa から) Adrian de Wynter and Daniel J. Perry から公開された研究論文: [Optimal Subarchitecture Extraction For BERT](https://arxiv.org/abs/2010.10499)
|
||||||
|
1. **[BridgeTower](https://huggingface.co/docs/transformers/model_doc/bridgetower)** (Harbin Institute of Technology/Microsoft Research Asia/Intel Labs から) released with the paper [BridgeTower: Building Bridges Between Encoders in Vision-Language Representation Learning](https://arxiv.org/abs/2206.08657) by Xiao Xu, Chenfei Wu, Shachar Rosenman, Vasudev Lal, Wanxiang Che, Nan Duan.
|
||||||
|
1. **[ByT5](https://huggingface.co/docs/transformers/model_doc/byt5)** (Google Research から) Linting Xue, Aditya Barua, Noah Constant, Rami Al-Rfou, Sharan Narang, Mihir Kale, Adam Roberts, Colin Raffel から公開された研究論文: [ByT5: Towards a token-free future with pre-trained byte-to-byte models](https://arxiv.org/abs/2105.13626)
|
||||||
|
1. **[CamemBERT](https://huggingface.co/docs/transformers/model_doc/camembert)** (Inria/Facebook/Sorbonne から) Louis Martin*, Benjamin Muller*, Pedro Javier Ortiz Suárez*, Yoann Dupont, Laurent Romary, Éric Villemonte de la Clergerie, Djamé Seddah and Benoît Sagot から公開された研究論文: [CamemBERT: a Tasty French Language Model](https://arxiv.org/abs/1911.03894)
|
||||||
|
1. **[CANINE](https://huggingface.co/docs/transformers/model_doc/canine)** (Google Research から) Jonathan H. Clark, Dan Garrette, Iulia Turc, John Wieting から公開された研究論文: [CANINE: Pre-training an Efficient Tokenization-Free Encoder for Language Representation](https://arxiv.org/abs/2103.06874)
|
||||||
|
1. **[Chinese-CLIP](https://huggingface.co/docs/transformers/model_doc/chinese_clip)** (OFA-Sys から) An Yang, Junshu Pan, Junyang Lin, Rui Men, Yichang Zhang, Jingren Zhou, Chang Zhou から公開された研究論文: [Chinese CLIP: Contrastive Vision-Language Pretraining in Chinese](https://arxiv.org/abs/2211.01335)
|
||||||
|
1. **[CLAP](https://huggingface.co/docs/transformers/model_doc/clap)** (LAION-AI から) Yusong Wu, Ke Chen, Tianyu Zhang, Yuchen Hui, Taylor Berg-Kirkpatrick, Shlomo Dubnov. から公開された研究論文 [Large-scale Contrastive Language-Audio Pretraining with Feature Fusion and Keyword-to-Caption Augmentation](https://arxiv.org/abs/2211.06687)
|
||||||
|
1. **[CLIP](https://huggingface.co/docs/transformers/model_doc/clip)** (OpenAI から) Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, Ilya Sutskever から公開された研究論文: [Learning Transferable Visual Models From Natural Language Supervision](https://arxiv.org/abs/2103.00020)
|
||||||
|
1. **[CLIPSeg](https://huggingface.co/docs/transformers/model_doc/clipseg)** (University of Göttingen から) Timo Lüddecke and Alexander Ecker から公開された研究論文: [Image Segmentation Using Text and Image Prompts](https://arxiv.org/abs/2112.10003)
|
||||||
|
1. **[CodeGen](https://huggingface.co/docs/transformers/model_doc/codegen)** (Salesforce から) Erik Nijkamp, Bo Pang, Hiroaki Hayashi, Lifu Tu, Huan Wang, Yingbo Zhou, Silvio Savarese, Caiming Xiong から公開された研究論文: [A Conversational Paradigm for Program Synthesis](https://arxiv.org/abs/2203.13474)
|
||||||
|
1. **[Conditional DETR](https://huggingface.co/docs/transformers/model_doc/conditional_detr)** (Microsoft Research Asia から) Depu Meng, Xiaokang Chen, Zejia Fan, Gang Zeng, Houqiang Li, Yuhui Yuan, Lei Sun, Jingdong Wang から公開された研究論文: [Conditional DETR for Fast Training Convergence](https://arxiv.org/abs/2108.06152)
|
||||||
|
1. **[ConvBERT](https://huggingface.co/docs/transformers/model_doc/convbert)** (YituTech から) Zihang Jiang, Weihao Yu, Daquan Zhou, Yunpeng Chen, Jiashi Feng, Shuicheng Yan から公開された研究論文: [ConvBERT: Improving BERT with Span-based Dynamic Convolution](https://arxiv.org/abs/2008.02496)
|
||||||
|
1. **[ConvNeXT](https://huggingface.co/docs/transformers/model_doc/convnext)** (Facebook AI から) Zhuang Liu, Hanzi Mao, Chao-Yuan Wu, Christoph Feichtenhofer, Trevor Darrell, Saining Xie から公開された研究論文: [A ConvNet for the 2020s](https://arxiv.org/abs/2201.03545)
|
||||||
|
1. **[ConvNeXTV2](https://huggingface.co/docs/transformers/model_doc/convnextv2)** (from Facebook AI) released with the paper [ConvNeXt V2: Co-designing and Scaling ConvNets with Masked Autoencoders](https://arxiv.org/abs/2301.00808) by Sanghyun Woo, Shoubhik Debnath, Ronghang Hu, Xinlei Chen, Zhuang Liu, In So Kweon, Saining Xie.
|
||||||
|
1. **[CPM](https://huggingface.co/docs/transformers/model_doc/cpm)** (Tsinghua University から) Zhengyan Zhang, Xu Han, Hao Zhou, Pei Ke, Yuxian Gu, Deming Ye, Yujia Qin, Yusheng Su, Haozhe Ji, Jian Guan, Fanchao Qi, Xiaozhi Wang, Yanan Zheng, Guoyang Zeng, Huanqi Cao, Shengqi Chen, Daixuan Li, Zhenbo Sun, Zhiyuan Liu, Minlie Huang, Wentao Han, Jie Tang, Juanzi Li, Xiaoyan Zhu, Maosong Sun から公開された研究論文: [CPM: A Large-scale Generative Chinese Pre-trained Language Model](https://arxiv.org/abs/2012.00413)
|
||||||
|
1. **[CPM-Ant](https://huggingface.co/docs/transformers/model_doc/cpmant)** (OpenBMB から) [OpenBMB](https://www.openbmb.org/) から公開されました.
|
||||||
|
1. **[CTRL](https://huggingface.co/docs/transformers/model_doc/ctrl)** (Salesforce から) Nitish Shirish Keskar*, Bryan McCann*, Lav R. Varshney, Caiming Xiong and Richard Socher から公開された研究論文: [CTRL: A Conditional Transformer Language Model for Controllable Generation](https://arxiv.org/abs/1909.05858)
|
||||||
|
1. **[CvT](https://huggingface.co/docs/transformers/model_doc/cvt)** (Microsoft から) Haiping Wu, Bin Xiao, Noel Codella, Mengchen Liu, Xiyang Dai, Lu Yuan, Lei Zhang から公開された研究論文: [CvT: Introducing Convolutions to Vision Transformers](https://arxiv.org/abs/2103.15808)
|
||||||
|
1. **[Data2Vec](https://huggingface.co/docs/transformers/model_doc/data2vec)** (Facebook から) Alexei Baevski, Wei-Ning Hsu, Qiantong Xu, Arun Babu, Jiatao Gu, Michael Auli から公開された研究論文: [Data2Vec: A General Framework for Self-supervised Learning in Speech, Vision and Language](https://arxiv.org/abs/2202.03555)
|
||||||
|
1. **[DeBERTa](https://huggingface.co/docs/transformers/model_doc/deberta)** (Microsoft から) Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen から公開された研究論文: [DeBERTa: Decoding-enhanced BERT with Disentangled Attention](https://arxiv.org/abs/2006.03654)
|
||||||
|
1. **[DeBERTa-v2](https://huggingface.co/docs/transformers/model_doc/deberta-v2)** (Microsoft から) Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen から公開された研究論文: [DeBERTa: Decoding-enhanced BERT with Disentangled Attention](https://arxiv.org/abs/2006.03654)
|
||||||
|
1. **[Decision Transformer](https://huggingface.co/docs/transformers/model_doc/decision_transformer)** (Berkeley/Facebook/Google から) Lili Chen, Kevin Lu, Aravind Rajeswaran, Kimin Lee, Aditya Grover, Michael Laskin, Pieter Abbeel, Aravind Srinivas, Igor Mordatch から公開された研究論文: [Decision Transformer: Reinforcement Learning via Sequence Modeling](https://arxiv.org/abs/2106.01345)
|
||||||
|
1. **[Deformable DETR](https://huggingface.co/docs/transformers/model_doc/deformable_detr)** (SenseTime Research から) Xizhou Zhu, Weijie Su, Lewei Lu, Bin Li, Xiaogang Wang, Jifeng Dai から公開された研究論文: [Deformable DETR: Deformable Transformers for End-to-End Object Detection](https://arxiv.org/abs/2010.04159)
|
||||||
|
1. **[DeiT](https://huggingface.co/docs/transformers/model_doc/deit)** (Facebook から) Hugo Touvron, Matthieu Cord, Matthijs Douze, Francisco Massa, Alexandre Sablayrolles, Hervé Jégou から公開された研究論文: [Training data-efficient image transformers & distillation through attention](https://arxiv.org/abs/2012.12877)
|
||||||
|
1. **[DePlot](https://huggingface.co/docs/transformers/model_doc/deplot)** (Google AI から) Fangyu Liu, Julian Martin Eisenschlos, Francesco Piccinno, Syrine Krichene, Chenxi Pang, Kenton Lee, Mandar Joshi, Wenhu Chen, Nigel Collier, Yasemin Altun. から公開された研究論文 [DePlot: One-shot visual language reasoning by plot-to-table translation](https://arxiv.org/abs/2212.10505)
|
||||||
|
1. **[DETA](https://huggingface.co/docs/transformers/model_doc/deta)** (The University of Texas at Austin から) Jeffrey Ouyang-Zhang, Jang Hyun Cho, Xingyi Zhou, Philipp Krähenbühl. から公開された研究論文 [NMS Strikes Back](https://arxiv.org/abs/2212.06137)
|
||||||
|
1. **[DETR](https://huggingface.co/docs/transformers/model_doc/detr)** (Facebook から) Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, Sergey Zagoruyko から公開された研究論文: [End-to-End Object Detection with Transformers](https://arxiv.org/abs/2005.12872)
|
||||||
|
1. **[DialoGPT](https://huggingface.co/docs/transformers/model_doc/dialogpt)** (Microsoft Research から) Yizhe Zhang, Siqi Sun, Michel Galley, Yen-Chun Chen, Chris Brockett, Xiang Gao, Jianfeng Gao, Jingjing Liu, Bill Dolan から公開された研究論文: [DialoGPT: Large-Scale Generative Pre-training for Conversational Response Generation](https://arxiv.org/abs/1911.00536)
|
||||||
|
1. **[DiNAT](https://huggingface.co/docs/transformers/model_doc/dinat)** (SHI Labs から) Ali Hassani and Humphrey Shi から公開された研究論文: [Dilated Neighborhood Attention Transformer](https://arxiv.org/abs/2209.15001)
|
||||||
|
1. **[DistilBERT](https://huggingface.co/docs/transformers/model_doc/distilbert)** (HuggingFace から), Victor Sanh, Lysandre Debut and Thomas Wolf. 同じ手法で GPT2, RoBERTa と Multilingual BERT の圧縮を行いました.圧縮されたモデルはそれぞれ [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/research_projects/distillation)、[DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/research_projects/distillation)、[DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/research_projects/distillation) と名付けられました. 公開された研究論文: [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter](https://arxiv.org/abs/1910.01108)
|
||||||
|
1. **[DiT](https://huggingface.co/docs/transformers/model_doc/dit)** (Microsoft Research から) Junlong Li, Yiheng Xu, Tengchao Lv, Lei Cui, Cha Zhang, Furu Wei から公開された研究論文: [DiT: Self-supervised Pre-training for Document Image Transformer](https://arxiv.org/abs/2203.02378)
|
||||||
|
1. **[Donut](https://huggingface.co/docs/transformers/model_doc/donut)** (NAVER から), Geewook Kim, Teakgyu Hong, Moonbin Yim, Jeongyeon Nam, Jinyoung Park, Jinyeong Yim, Wonseok Hwang, Sangdoo Yun, Dongyoon Han, Seunghyun Park から公開された研究論文: [OCR-free Document Understanding Transformer](https://arxiv.org/abs/2111.15664)
|
||||||
|
1. **[DPR](https://huggingface.co/docs/transformers/model_doc/dpr)** (Facebook から) Vladimir Karpukhin, Barlas Oğuz, Sewon Min, Patrick Lewis, Ledell Wu, Sergey Edunov, Danqi Chen, and Wen-tau Yih から公開された研究論文: [Dense Passage Retrieval for Open-Domain Question Answering](https://arxiv.org/abs/2004.04906)
|
||||||
|
1. **[DPT](https://huggingface.co/docs/transformers/master/model_doc/dpt)** (Intel Labs から) René Ranftl, Alexey Bochkovskiy, Vladlen Koltun から公開された研究論文: [Vision Transformers for Dense Prediction](https://arxiv.org/abs/2103.13413)
|
||||||
|
1. **[EfficientFormer](https://huggingface.co/docs/transformers/model_doc/efficientformer)** (Snap Research から) Yanyu Li, Geng Yuan, Yang Wen, Ju Hu, Georgios Evangelidis, Sergey Tulyakov, Yanzhi Wang, Jian Ren. から公開された研究論文 [EfficientFormer: Vision Transformers at MobileNetSpeed](https://arxiv.org/abs/2206.01191)
|
||||||
|
1. **[EfficientNet](https://huggingface.co/docs/transformers/model_doc/efficientnet)** (from Google Brain) released with the paper [EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks](https://arxiv.org/abs/1905.11946) by Mingxing Tan, Quoc V. Le.
|
||||||
|
1. **[ELECTRA](https://huggingface.co/docs/transformers/model_doc/electra)** (Google Research/Stanford University から) Kevin Clark, Minh-Thang Luong, Quoc V. Le, Christopher D. Manning から公開された研究論文: [ELECTRA: Pre-training text encoders as discriminators rather than generators](https://arxiv.org/abs/2003.10555)
|
||||||
|
1. **[EnCodec](https://huggingface.co/docs/transformers/model_doc/encodec)** (Meta AI から) Alexandre Défossez, Jade Copet, Gabriel Synnaeve, Yossi Adi. から公開された研究論文 [High Fidelity Neural Audio Compression](https://arxiv.org/abs/2210.13438)
|
||||||
|
1. **[EncoderDecoder](https://huggingface.co/docs/transformers/model_doc/encoder-decoder)** (Google Research から) Sascha Rothe, Shashi Narayan, Aliaksei Severyn から公開された研究論文: [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://arxiv.org/abs/1907.12461)
|
||||||
|
1. **[ERNIE](https://huggingface.co/docs/transformers/model_doc/ernie)** (Baidu から) Yu Sun, Shuohuan Wang, Yukun Li, Shikun Feng, Xuyi Chen, Han Zhang, Xin Tian, Danxiang Zhu, Hao Tian, Hua Wu から公開された研究論文: [ERNIE: Enhanced Representation through Knowledge Integration](https://arxiv.org/abs/1904.09223)
|
||||||
|
1. **[ErnieM](https://huggingface.co/docs/transformers/model_doc/ernie_m)** (Baidu から) Xuan Ouyang, Shuohuan Wang, Chao Pang, Yu Sun, Hao Tian, Hua Wu, Haifeng Wang. から公開された研究論文 [ERNIE-M: Enhanced Multilingual Representation by Aligning Cross-lingual Semantics with Monolingual Corpora](https://arxiv.org/abs/2012.15674)
|
||||||
|
1. **[ESM](https://huggingface.co/docs/transformers/model_doc/esm)** (Meta AI から) はトランスフォーマープロテイン言語モデルです. **ESM-1b** は Alexander Rives, Joshua Meier, Tom Sercu, Siddharth Goyal, Zeming Lin, Jason Liu, Demi Guo, Myle Ott, C. Lawrence Zitnick, Jerry Ma, and Rob Fergus から公開された研究論文: [Biological structure and function emerge from scaling unsupervised learning to 250 million protein sequences](https://www.pnas.org/content/118/15/e2016239118). **ESM-1v** は Joshua Meier, Roshan Rao, Robert Verkuil, Jason Liu, Tom Sercu and Alexander Rives から公開された研究論文: [Language models enable zero-shot prediction of the effects of mutations on protein function](https://doi.org/10.1101/2021.07.09.450648). **ESM-2** と **ESMFold** は Zeming Lin, Halil Akin, Roshan Rao, Brian Hie, Zhongkai Zhu, Wenting Lu, Allan dos Santos Costa, Maryam Fazel-Zarandi, Tom Sercu, Sal Candido, Alexander Rives から公開された研究論文: [Language models of protein sequences at the scale of evolution enable accurate structure prediction](https://doi.org/10.1101/2022.07.20.500902)
|
||||||
|
1. **[Falcon](https://huggingface.co/docs/transformers/model_doc/falcon)** (from Technology Innovation Institute) by Almazrouei, Ebtesam and Alobeidli, Hamza and Alshamsi, Abdulaziz and Cappelli, Alessandro and Cojocaru, Ruxandra and Debbah, Merouane and Goffinet, Etienne and Heslow, Daniel and Launay, Julien and Malartic, Quentin and Noune, Badreddine and Pannier, Baptiste and Penedo, Guilherme.
|
||||||
|
1. **[FLAN-T5](https://huggingface.co/docs/transformers/model_doc/flan-t5)** (Google AI から) Hyung Won Chung, Le Hou, Shayne Longpre, Barret Zoph, Yi Tay, William Fedus, Eric Li, Xuezhi Wang, Mostafa Dehghani, Siddhartha Brahma, Albert Webson, Shixiang Shane Gu, Zhuyun Dai, Mirac Suzgun, Xinyun Chen, Aakanksha Chowdhery, Sharan Narang, Gaurav Mishra, Adams Yu, Vincent Zhao, Yanping Huang, Andrew Dai, Hongkun Yu, Slav Petrov, Ed H. Chi, Jeff Dean, Jacob Devlin, Adam Roberts, Denny Zhou, Quoc V から公開されたレポジトリー [google-research/t5x](https://github.com/google-research/t5x/blob/main/docs/models.md#flan-t5-checkpoints) Le, and Jason Wei
|
||||||
|
1. **[FLAN-UL2](https://huggingface.co/docs/transformers/model_doc/flan-ul2)** (from Google AI) released in the repository [google-research/t5x](https://github.com/google-research/t5x/blob/main/docs/models.md#flan-ul2-checkpoints) by Hyung Won Chung, Le Hou, Shayne Longpre, Barret Zoph, Yi Tay, William Fedus, Eric Li, Xuezhi Wang, Mostafa Dehghani, Siddhartha Brahma, Albert Webson, Shixiang Shane Gu, Zhuyun Dai, Mirac Suzgun, Xinyun Chen, Aakanksha Chowdhery, Sharan Narang, Gaurav Mishra, Adams Yu, Vincent Zhao, Yanping Huang, Andrew Dai, Hongkun Yu, Slav Petrov, Ed H. Chi, Jeff Dean, Jacob Devlin, Adam Roberts, Denny Zhou, Quoc V. Le, and Jason Wei
|
||||||
|
1. **[FlauBERT](https://huggingface.co/docs/transformers/model_doc/flaubert)** (CNRS から) Hang Le, Loïc Vial, Jibril Frej, Vincent Segonne, Maximin Coavoux, Benjamin Lecouteux, Alexandre Allauzen, Benoît Crabbé, Laurent Besacier, Didier Schwab から公開された研究論文: [FlauBERT: Unsupervised Language Model Pre-training for French](https://arxiv.org/abs/1912.05372)
|
||||||
|
1. **[FLAVA](https://huggingface.co/docs/transformers/model_doc/flava)** (Facebook AI から) Amanpreet Singh, Ronghang Hu, Vedanuj Goswami, Guillaume Couairon, Wojciech Galuba, Marcus Rohrbach, and Douwe Kiela から公開された研究論文: [FLAVA: A Foundational Language And Vision Alignment Model](https://arxiv.org/abs/2112.04482)
|
||||||
|
1. **[FNet](https://huggingface.co/docs/transformers/model_doc/fnet)** (Google Research から) James Lee-Thorp, Joshua Ainslie, Ilya Eckstein, Santiago Ontanon から公開された研究論文: [FNet: Mixing Tokens with Fourier Transforms](https://arxiv.org/abs/2105.03824)
|
||||||
|
1. **[FocalNet](https://huggingface.co/docs/transformers/model_doc/focalnet)** (Microsoft Research から) Jianwei Yang, Chunyuan Li, Xiyang Dai, Lu Yuan, Jianfeng Gao. から公開された研究論文 [Focal Modulation Networks](https://arxiv.org/abs/2203.11926)
|
||||||
|
1. **[Funnel Transformer](https://huggingface.co/docs/transformers/model_doc/funnel)** (CMU/Google Brain から) Zihang Dai, Guokun Lai, Yiming Yang, Quoc V. Le から公開された研究論文: [Funnel-Transformer: Filtering out Sequential Redundancy for Efficient Language Processing](https://arxiv.org/abs/2006.03236)
|
||||||
|
1. **[GIT](https://huggingface.co/docs/transformers/model_doc/git)** (Microsoft Research から) Jianfeng Wang, Zhengyuan Yang, Xiaowei Hu, Linjie Li, Kevin Lin, Zhe Gan, Zicheng Liu, Ce Liu, Lijuan Wang. から公開された研究論文 [GIT: A Generative Image-to-text Transformer for Vision and Language](https://arxiv.org/abs/2205.14100)
|
||||||
|
1. **[GLPN](https://huggingface.co/docs/transformers/model_doc/glpn)** (KAIST から) Doyeon Kim, Woonghyun Ga, Pyungwhan Ahn, Donggyu Joo, Sehwan Chun, Junmo Kim から公開された研究論文: [Global-Local Path Networks for Monocular Depth Estimation with Vertical CutDepth](https://arxiv.org/abs/2201.07436)
|
||||||
|
1. **[GPT](https://huggingface.co/docs/transformers/model_doc/openai-gpt)** (OpenAI から) Alec Radford, Karthik Narasimhan, Tim Salimans and Ilya Sutskever から公開された研究論文: [Improving Language Understanding by Generative Pre-Training](https://blog.openai.com/language-unsupervised/)
|
||||||
|
1. **[GPT Neo](https://huggingface.co/docs/transformers/model_doc/gpt_neo)** (EleutherAI から) Sid Black, Stella Biderman, Leo Gao, Phil Wang and Connor Leahy から公開されたレポジトリー : [EleutherAI/gpt-neo](https://github.com/EleutherAI/gpt-neo)
|
||||||
|
1. **[GPT NeoX](https://huggingface.co/docs/transformers/model_doc/gpt_neox)** (EleutherAI から) Sid Black, Stella Biderman, Eric Hallahan, Quentin Anthony, Leo Gao, Laurence Golding, Horace He, Connor Leahy, Kyle McDonell, Jason Phang, Michael Pieler, USVSN Sai Prashanth, Shivanshu Purohit, Laria Reynolds, Jonathan Tow, Ben Wang, Samuel Weinbach から公開された研究論文: [GPT-NeoX-20B: An Open-Source Autoregressive Language Model](https://arxiv.org/abs/2204.06745)
|
||||||
|
1. **[GPT NeoX Japanese](https://huggingface.co/docs/transformers/model_doc/gpt_neox_japanese)** (ABEJA から) Shinya Otani, Takayoshi Makabe, Anuj Arora, and Kyo Hattori からリリース.
|
||||||
|
1. **[GPT-2](https://huggingface.co/docs/transformers/model_doc/gpt2)** (OpenAI から) Alec Radford*, Jeffrey Wu*, Rewon Child, David Luan, Dario Amodei** and Ilya Sutskever** から公開された研究論文: [Language Models are Unsupervised Multitask Learners](https://blog.openai.com/better-language-models/)
|
||||||
|
1. **[GPT-J](https://huggingface.co/docs/transformers/model_doc/gptj)** (EleutherAI から) Ben Wang and Aran Komatsuzaki から公開されたレポジトリー [kingoflolz/mesh-transformer-jax](https://github.com/kingoflolz/mesh-transformer-jax/)
|
||||||
|
1. **[GPT-Sw3](https://huggingface.co/docs/transformers/model_doc/gpt-sw3)** (AI-Sweden から) Ariel Ekgren, Amaru Cuba Gyllensten, Evangelia Gogoulou, Alice Heiman, Severine Verlinden, Joey Öhman, Fredrik Carlsson, Magnus Sahlgren から公開された研究論文: [Lessons Learned from GPT-SW3: Building the First Large-Scale Generative Language Model for Swedish](http://www.lrec-conf.org/proceedings/lrec2022/pdf/2022.lrec-1.376.pdf)
|
||||||
|
1. **[GPTBigCode](https://huggingface.co/docs/transformers/model_doc/gpt_bigcode)** (BigCode から) Loubna Ben Allal, Raymond Li, Denis Kocetkov, Chenghao Mou, Christopher Akiki, Carlos Munoz Ferrandis, Niklas Muennighoff, Mayank Mishra, Alex Gu, Manan Dey, Logesh Kumar Umapathi, Carolyn Jane Anderson, Yangtian Zi, Joel Lamy Poirier, Hailey Schoelkopf, Sergey Troshin, Dmitry Abulkhanov, Manuel Romero, Michael Lappert, Francesco De Toni, Bernardo García del Río, Qian Liu, Shamik Bose, Urvashi Bhattacharyya, Terry Yue Zhuo, Ian Yu, Paulo Villegas, Marco Zocca, Sourab Mangrulkar, David Lansky, Huu Nguyen, Danish Contractor, Luis Villa, Jia Li, Dzmitry Bahdanau, Yacine Jernite, Sean Hughes, Daniel Fried, Arjun Guha, Harm de Vries, Leandro von Werra. から公開された研究論文 [SantaCoder: don't reach for the stars!](https://arxiv.org/abs/2301.03988)
|
||||||
|
1. **[GPTSAN-japanese](https://huggingface.co/docs/transformers/model_doc/gptsan-japanese)** [tanreinama/GPTSAN](https://github.com/tanreinama/GPTSAN/blob/main/report/model.md) 坂本俊之(tanreinama)からリリースされました.
|
||||||
|
1. **[Graphormer](https://huggingface.co/docs/transformers/model_doc/graphormer)** (Microsoft から) Chengxuan Ying, Tianle Cai, Shengjie Luo, Shuxin Zheng, Guolin Ke, Di He, Yanming Shen, Tie-Yan Liu から公開された研究論文: [Do Transformers Really Perform Bad for Graph Representation?](https://arxiv.org/abs/2106.05234).
|
||||||
|
1. **[GroupViT](https://huggingface.co/docs/transformers/model_doc/groupvit)** (UCSD, NVIDIA から) Jiarui Xu, Shalini De Mello, Sifei Liu, Wonmin Byeon, Thomas Breuel, Jan Kautz, Xiaolong Wang から公開された研究論文: [GroupViT: Semantic Segmentation Emerges from Text Supervision](https://arxiv.org/abs/2202.11094)
|
||||||
|
1. **[Hubert](https://huggingface.co/docs/transformers/model_doc/hubert)** (Facebook から) Wei-Ning Hsu, Benjamin Bolte, Yao-Hung Hubert Tsai, Kushal Lakhotia, Ruslan Salakhutdinov, Abdelrahman Mohamed から公開された研究論文: [HuBERT: Self-Supervised Speech Representation Learning by Masked Prediction of Hidden Units](https://arxiv.org/abs/2106.07447)
|
||||||
|
1. **[I-BERT](https://huggingface.co/docs/transformers/model_doc/ibert)** (Berkeley から) Sehoon Kim, Amir Gholami, Zhewei Yao, Michael W. Mahoney, Kurt Keutzer から公開された研究論文: [I-BERT: Integer-only BERT Quantization](https://arxiv.org/abs/2101.01321)
|
||||||
|
1. **[ImageGPT](https://huggingface.co/docs/transformers/model_doc/imagegpt)** (OpenAI から) Mark Chen, Alec Radford, Rewon Child, Jeffrey Wu, Heewoo Jun, David Luan, Ilya Sutskever から公開された研究論文: [Generative Pretraining from Pixels](https://openai.com/blog/image-gpt/)
|
||||||
|
1. **[Informer](https://huggingface.co/docs/transformers/model_doc/informer)** (from Beihang University, UC Berkeley, Rutgers University, SEDD Company) released with the paper [Informer: Beyond Efficient Transformer for Long Sequence Time-Series Forecasting](https://arxiv.org/abs/2012.07436) by Haoyi Zhou, Shanghang Zhang, Jieqi Peng, Shuai Zhang, Jianxin Li, Hui Xiong, and Wancai Zhang.
|
||||||
|
1. **[InstructBLIP](https://huggingface.co/docs/transformers/model_doc/instructblip)** (Salesforce から) Wenliang Dai, Junnan Li, Dongxu Li, Anthony Meng Huat Tiong, Junqi Zhao, Weisheng Wang, Boyang Li, Pascale Fung, Steven Hoi. から公開された研究論文 [InstructBLIP: Towards General-purpose Vision-Language Models with Instruction Tuning](https://arxiv.org/abs/2305.06500)
|
||||||
|
1. **[Jukebox](https://huggingface.co/docs/transformers/model_doc/jukebox)** (OpenAI から) Prafulla Dhariwal, Heewoo Jun, Christine Payne, Jong Wook Kim, Alec Radford, Ilya Sutskever から公開された研究論文: [Jukebox: A Generative Model for Music](https://arxiv.org/pdf/2005.00341.pdf)
|
||||||
|
1. **[LayoutLM](https://huggingface.co/docs/transformers/model_doc/layoutlm)** (Microsoft Research Asia から) Yiheng Xu, Minghao Li, Lei Cui, Shaohan Huang, Furu Wei, Ming Zhou から公開された研究論文: [LayoutLM: Pre-training of Text and Layout for Document Image Understanding](https://arxiv.org/abs/1912.13318)
|
||||||
|
1. **[LayoutLMv2](https://huggingface.co/docs/transformers/model_doc/layoutlmv2)** (Microsoft Research Asia から) Yang Xu, Yiheng Xu, Tengchao Lv, Lei Cui, Furu Wei, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Wanxiang Che, Min Zhang, Lidong Zhou から公開された研究論文: [LayoutLMv2: Multi-modal Pre-training for Visually-Rich Document Understanding](https://arxiv.org/abs/2012.14740)
|
||||||
|
1. **[LayoutLMv3](https://huggingface.co/docs/transformers/model_doc/layoutlmv3)** (Microsoft Research Asia から) Yupan Huang, Tengchao Lv, Lei Cui, Yutong Lu, Furu Wei から公開された研究論文: [LayoutLMv3: Pre-training for Document AI with Unified Text and Image Masking](https://arxiv.org/abs/2204.08387)
|
||||||
|
1. **[LayoutXLM](https://huggingface.co/docs/transformers/model_doc/layoutxlm)** (Microsoft Research Asia から) Yiheng Xu, Tengchao Lv, Lei Cui, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Furu Wei から公開された研究論文: [LayoutXLM: Multimodal Pre-training for Multilingual Visually-rich Document Understanding](https://arxiv.org/abs/2104.08836)
|
||||||
|
1. **[LED](https://huggingface.co/docs/transformers/model_doc/led)** (AllenAI から) Iz Beltagy, Matthew E. Peters, Arman Cohan から公開された研究論文: [Longformer: The Long-Document Transformer](https://arxiv.org/abs/2004.05150)
|
||||||
|
1. **[LeViT](https://huggingface.co/docs/transformers/model_doc/levit)** (Meta AI から) Ben Graham, Alaaeldin El-Nouby, Hugo Touvron, Pierre Stock, Armand Joulin, Hervé Jégou, Matthijs Douze から公開された研究論文: [LeViT: A Vision Transformer in ConvNet's Clothing for Faster Inference](https://arxiv.org/abs/2104.01136)
|
||||||
|
1. **[LiLT](https://huggingface.co/docs/transformers/model_doc/lilt)** (South China University of Technology から) Jiapeng Wang, Lianwen Jin, Kai Ding から公開された研究論文: [LiLT: A Simple yet Effective Language-Independent Layout Transformer for Structured Document Understanding](https://arxiv.org/abs/2202.13669)
|
||||||
|
1. **[LLaMA](https://huggingface.co/docs/transformers/model_doc/llama)** (The FAIR team of Meta AI から) Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timothée Lacroix, Baptiste Rozière, Naman Goyal, Eric Hambro, Faisal Azhar, Aurelien Rodriguez, Armand Joulin, Edouard Grave, Guillaume Lample. から公開された研究論文 [LLaMA: Open and Efficient Foundation Language Models](https://arxiv.org/abs/2302.13971)
|
||||||
|
1. **[Llama2](https://huggingface.co/docs/transformers/model_doc/llama2)** (The FAIR team of Meta AI から) Hugo Touvron, Louis Martin, Kevin Stone, Peter Albert, Amjad Almahairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajjwal Bhargava, Shruti Bhosale, Dan Bikel, Lukas Blecher, Cristian Canton Ferrer, Moya Chen, Guillem Cucurull, David Esiobu, Jude Fernandes, Jeremy Fu, Wenyin Fu, Brian Fuller, Cynthia Gao, Vedanuj Goswami, Naman Goyal, Anthony Hartshorn, Saghar Hosseini, Rui Hou, Hakan Inan, Marcin Kardas, Viktor Kerkez Madian Khabsa, Isabel Kloumann, Artem Korenev, Punit Singh Koura, Marie-Anne Lachaux, Thibaut Lavril, Jenya Lee, Diana Liskovich, Yinghai Lu, Yuning Mao, Xavier Martinet, Todor Mihaylov, Pushka rMishra, Igor Molybog, Yixin Nie, Andrew Poulton, Jeremy Reizenstein, Rashi Rungta, Kalyan Saladi, Alan Schelten, Ruan Silva, Eric Michael Smith, Ranjan Subramanian, Xiaoqing EllenTan, Binh Tang, Ross Taylor, Adina Williams, Jian Xiang Kuan, Puxin Xu, Zheng Yan, Iliyan Zarov, Yuchen Zhang, Angela Fan, Melanie Kambadur, Sharan Narang, Aurelien Rodriguez, Robert Stojnic, Sergey Edunov, Thomas Scialom.. から公開された研究論文 [Llama2: Open Foundation and Fine-Tuned Chat Models](https://ai.meta.com/research/publications/llama-2-open-foundation-and-fine-tuned-chat-models/XXX)
|
||||||
|
1. **[Longformer](https://huggingface.co/docs/transformers/model_doc/longformer)** (AllenAI から) Iz Beltagy, Matthew E. Peters, Arman Cohan から公開された研究論文: [Longformer: The Long-Document Transformer](https://arxiv.org/abs/2004.05150)
|
||||||
|
1. **[LongT5](https://huggingface.co/docs/transformers/model_doc/longt5)** (Google AI から) Mandy Guo, Joshua Ainslie, David Uthus, Santiago Ontanon, Jianmo Ni, Yun-Hsuan Sung, Yinfei Yang から公開された研究論文: [LongT5: Efficient Text-To-Text Transformer for Long Sequences](https://arxiv.org/abs/2112.07916)
|
||||||
|
1. **[LUKE](https://huggingface.co/docs/transformers/model_doc/luke)** (Studio Ousia から) Ikuya Yamada, Akari Asai, Hiroyuki Shindo, Hideaki Takeda, Yuji Matsumoto から公開された研究論文: [LUKE: Deep Contextualized Entity Representations with Entity-aware Self-attention](https://arxiv.org/abs/2010.01057)
|
||||||
|
1. **[LXMERT](https://huggingface.co/docs/transformers/model_doc/lxmert)** (UNC Chapel Hill から) Hao Tan and Mohit Bansal から公開された研究論文: [LXMERT: Learning Cross-Modality Encoder Representations from Transformers for Open-Domain Question Answering](https://arxiv.org/abs/1908.07490)
|
||||||
|
1. **[M-CTC-T](https://huggingface.co/docs/transformers/model_doc/mctct)** (Facebook から) Loren Lugosch, Tatiana Likhomanenko, Gabriel Synnaeve, and Ronan Collobert から公開された研究論文: [Pseudo-Labeling For Massively Multilingual Speech Recognition](https://arxiv.org/abs/2111.00161)
|
||||||
|
1. **[M2M100](https://huggingface.co/docs/transformers/model_doc/m2m_100)** (Facebook から) Angela Fan, Shruti Bhosale, Holger Schwenk, Zhiyi Ma, Ahmed El-Kishky, Siddharth Goyal, Mandeep Baines, Onur Celebi, Guillaume Wenzek, Vishrav Chaudhary, Naman Goyal, Tom Birch, Vitaliy Liptchinsky, Sergey Edunov, Edouard Grave, Michael Auli, Armand Joulin から公開された研究論文: [Beyond English-Centric Multilingual Machine Translation](https://arxiv.org/abs/2010.11125)
|
||||||
|
1. **[MarianMT](https://huggingface.co/docs/transformers/model_doc/marian)** Jörg Tiedemann から. [OPUS](http://opus.nlpl.eu/) を使いながら学習された "Machine translation" (マシントランスレーション) モデル. [Marian Framework](https://marian-nmt.github.io/) はMicrosoft Translator Team が現在開発中です.
|
||||||
|
1. **[MarkupLM](https://huggingface.co/docs/transformers/model_doc/markuplm)** (Microsoft Research Asia から) Junlong Li, Yiheng Xu, Lei Cui, Furu Wei から公開された研究論文: [MarkupLM: Pre-training of Text and Markup Language for Visually-rich Document Understanding](https://arxiv.org/abs/2110.08518)
|
||||||
|
1. **[Mask2Former](https://huggingface.co/docs/transformers/model_doc/mask2former)** (FAIR and UIUC から) Bowen Cheng, Ishan Misra, Alexander G. Schwing, Alexander Kirillov, Rohit Girdhar. から公開された研究論文 [Masked-attention Mask Transformer for Universal Image Segmentation](https://arxiv.org/abs/2112.01527)
|
||||||
|
1. **[MaskFormer](https://huggingface.co/docs/transformers/model_doc/maskformer)** (Meta and UIUC から) Bowen Cheng, Alexander G. Schwing, Alexander Kirillov から公開された研究論文: [Per-Pixel Classification is Not All You Need for Semantic Segmentation](https://arxiv.org/abs/2107.06278)
|
||||||
|
1. **[MatCha](https://huggingface.co/docs/transformers/model_doc/matcha)** (Google AI から) Fangyu Liu, Francesco Piccinno, Syrine Krichene, Chenxi Pang, Kenton Lee, Mandar Joshi, Yasemin Altun, Nigel Collier, Julian Martin Eisenschlos. から公開された研究論文 [MatCha: Enhancing Visual Language Pretraining with Math Reasoning and Chart Derendering](https://arxiv.org/abs/2212.09662)
|
||||||
|
1. **[mBART](https://huggingface.co/docs/transformers/model_doc/mbart)** (Facebook から) Yinhan Liu, Jiatao Gu, Naman Goyal, Xian Li, Sergey Edunov, Marjan Ghazvininejad, Mike Lewis, Luke Zettlemoyer から公開された研究論文: [Multilingual Denoising Pre-training for Neural Machine Translation](https://arxiv.org/abs/2001.08210)
|
||||||
|
1. **[mBART-50](https://huggingface.co/docs/transformers/model_doc/mbart)** (Facebook から) Yuqing Tang, Chau Tran, Xian Li, Peng-Jen Chen, Naman Goyal, Vishrav Chaudhary, Jiatao Gu, Angela Fan から公開された研究論文: [Multilingual Translation with Extensible Multilingual Pretraining and Finetuning](https://arxiv.org/abs/2008.00401)
|
||||||
|
1. **[MEGA](https://huggingface.co/docs/transformers/model_doc/mega)** (Facebook から) Xuezhe Ma, Chunting Zhou, Xiang Kong, Junxian He, Liangke Gui, Graham Neubig, Jonathan May, and Luke Zettlemoyer. から公開された研究論文 [Mega: Moving Average Equipped Gated Attention](https://arxiv.org/abs/2209.10655)
|
||||||
|
1. **[Megatron-BERT](https://huggingface.co/docs/transformers/model_doc/megatron-bert)** (NVIDIA から) Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper and Bryan Catanzaro から公開された研究論文: [Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism](https://arxiv.org/abs/1909.08053)
|
||||||
|
1. **[Megatron-GPT2](https://huggingface.co/docs/transformers/model_doc/megatron_gpt2)** (NVIDIA から) Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper and Bryan Catanzaro から公開された研究論文: [Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism](https://arxiv.org/abs/1909.08053)
|
||||||
|
1. **[MGP-STR](https://huggingface.co/docs/transformers/model_doc/mgp-str)** (Alibaba Research から) Peng Wang, Cheng Da, and Cong Yao. から公開された研究論文 [Multi-Granularity Prediction for Scene Text Recognition](https://arxiv.org/abs/2209.03592)
|
||||||
|
1. **[mLUKE](https://huggingface.co/docs/transformers/model_doc/mluke)** (Studio Ousia から) Ryokan Ri, Ikuya Yamada, and Yoshimasa Tsuruoka から公開された研究論文: [mLUKE: The Power of Entity Representations in Multilingual Pretrained Language Models](https://arxiv.org/abs/2110.08151)
|
||||||
|
1. **[MMS](https://huggingface.co/docs/transformers/model_doc/mms)** (Facebook から) Vineel Pratap, Andros Tjandra, Bowen Shi, Paden Tomasello, Arun Babu, Sayani Kundu, Ali Elkahky, Zhaoheng Ni, Apoorv Vyas, Maryam Fazel-Zarandi, Alexei Baevski, Yossi Adi, Xiaohui Zhang, Wei-Ning Hsu, Alexis Conneau, Michael Auli. から公開された研究論文 [Scaling Speech Technology to 1,000+ Languages](https://arxiv.org/abs/2305.13516)
|
||||||
|
1. **[MobileBERT](https://huggingface.co/docs/transformers/model_doc/mobilebert)** (CMU/Google Brain から) Zhiqing Sun, Hongkun Yu, Xiaodan Song, Renjie Liu, Yiming Yang, and Denny Zhou から公開された研究論文: [MobileBERT: a Compact Task-Agnostic BERT for Resource-Limited Devices](https://arxiv.org/abs/2004.02984)
|
||||||
|
1. **[MobileNetV1](https://huggingface.co/docs/transformers/model_doc/mobilenet_v1)** (Google Inc. から) Andrew G. Howard, Menglong Zhu, Bo Chen, Dmitry Kalenichenko, Weijun Wang, Tobias Weyand, Marco Andreetto, Hartwig Adam から公開された研究論文: [MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications](https://arxiv.org/abs/1704.04861)
|
||||||
|
1. **[MobileNetV2](https://huggingface.co/docs/transformers/model_doc/mobilenet_v2)** (Google Inc. から) Mark Sandler, Andrew Howard, Menglong Zhu, Andrey Zhmoginov, Liang-Chieh Chen から公開された研究論文: [MobileNetV2: Inverted Residuals and Linear Bottlenecks](https://arxiv.org/abs/1801.04381)
|
||||||
|
1. **[MobileViT](https://huggingface.co/docs/transformers/model_doc/mobilevit)** (Apple から) Sachin Mehta and Mohammad Rastegari から公開された研究論文: [MobileViT: Light-weight, General-purpose, and Mobile-friendly Vision Transformer](https://arxiv.org/abs/2110.02178)
|
||||||
|
1. **[MobileViTV2](https://huggingface.co/docs/transformers/model_doc/mobilevitv2)** (Apple から) Sachin Mehta and Mohammad Rastegari. から公開された研究論文 [Separable Self-attention for Mobile Vision Transformers](https://arxiv.org/abs/2206.02680)
|
||||||
|
1. **[MPNet](https://huggingface.co/docs/transformers/model_doc/mpnet)** (Microsoft Research から) Kaitao Song, Xu Tan, Tao Qin, Jianfeng Lu, Tie-Yan Liu から公開された研究論文: [MPNet: Masked and Permuted Pre-training for Language Understanding](https://arxiv.org/abs/2004.09297)
|
||||||
|
1. **[MRA](https://huggingface.co/docs/transformers/model_doc/mra)** (the University of Wisconsin - Madison から) Zhanpeng Zeng, Sourav Pal, Jeffery Kline, Glenn M Fung, Vikas Singh. から公開された研究論文 [Multi Resolution Analysis (MRA)](https://arxiv.org/abs/2207.10284)
|
||||||
|
1. **[MT5](https://huggingface.co/docs/transformers/model_doc/mt5)** (Google AI から) Linting Xue, Noah Constant, Adam Roberts, Mihir Kale, Rami Al-Rfou, Aditya Siddhant, Aditya Barua, Colin Raffel から公開された研究論文: [mT5: A massively multilingual pre-trained text-to-text transformer](https://arxiv.org/abs/2010.11934)
|
||||||
|
1. **[MusicGen](https://huggingface.co/docs/transformers/model_doc/musicgen)** (from Meta) released with the paper [Simple and Controllable Music Generation](https://arxiv.org/abs/2306.05284) by Jade Copet, Felix Kreuk, Itai Gat, Tal Remez, David Kant, Gabriel Synnaeve, Yossi Adi and Alexandre Défossez.
|
||||||
|
1. **[MVP](https://huggingface.co/docs/transformers/model_doc/mvp)** (RUC AI Box から) Tianyi Tang, Junyi Li, Wayne Xin Zhao and Ji-Rong Wen から公開された研究論文: [MVP: Multi-task Supervised Pre-training for Natural Language Generation](https://arxiv.org/abs/2206.12131)
|
||||||
|
1. **[NAT](https://huggingface.co/docs/transformers/model_doc/nat)** (SHI Labs から) Ali Hassani, Steven Walton, Jiachen Li, Shen Li, and Humphrey Shi から公開された研究論文: [Neighborhood Attention Transformer](https://arxiv.org/abs/2204.07143)
|
||||||
|
1. **[Nezha](https://huggingface.co/docs/transformers/model_doc/nezha)** (Huawei Noah’s Ark Lab から) Junqiu Wei, Xiaozhe Ren, Xiaoguang Li, Wenyong Huang, Yi Liao, Yasheng Wang, Jiashu Lin, Xin Jiang, Xiao Chen and Qun Liu から公開された研究論文: [NEZHA: Neural Contextualized Representation for Chinese Language Understanding](https://arxiv.org/abs/1909.00204)
|
||||||
|
1. **[NLLB](https://huggingface.co/docs/transformers/model_doc/nllb)** (Meta から) the NLLB team から公開された研究論文: [No Language Left Behind: Scaling Human-Centered Machine Translation](https://arxiv.org/abs/2207.04672)
|
||||||
|
1. **[NLLB-MOE](https://huggingface.co/docs/transformers/model_doc/nllb-moe)** (Meta から) the NLLB team. から公開された研究論文 [No Language Left Behind: Scaling Human-Centered Machine Translation](https://arxiv.org/abs/2207.04672)
|
||||||
|
1. **[Nyströmformer](https://huggingface.co/docs/transformers/model_doc/nystromformer)** (the University of Wisconsin - Madison から) Yunyang Xiong, Zhanpeng Zeng, Rudrasis Chakraborty, Mingxing Tan, Glenn Fung, Yin Li, Vikas Singh から公開された研究論文: [Nyströmformer: A Nyström-Based Algorithm for Approximating Self-Attention](https://arxiv.org/abs/2102.03902)
|
||||||
|
1. **[OneFormer](https://huggingface.co/docs/transformers/model_doc/oneformer)** (SHI Labs から) Jitesh Jain, Jiachen Li, MangTik Chiu, Ali Hassani, Nikita Orlov, Humphrey Shi から公開された研究論文: [OneFormer: One Transformer to Rule Universal Image Segmentation](https://arxiv.org/abs/2211.06220)
|
||||||
|
1. **[OpenLlama](https://huggingface.co/docs/transformers/model_doc/open-llama)** (from [s-JoL](https://huggingface.co/s-JoL)) released in [Open-Llama](https://github.com/s-JoL/Open-Llama).
|
||||||
|
1. **[OPT](https://huggingface.co/docs/transformers/master/model_doc/opt)** (Meta AI から) Susan Zhang, Stephen Roller, Naman Goyal, Mikel Artetxe, Moya Chen, Shuohui Chen et al から公開された研究論文: [OPT: Open Pre-trained Transformer Language Models](https://arxiv.org/abs/2205.01068)
|
||||||
|
1. **[OWL-ViT](https://huggingface.co/docs/transformers/model_doc/owlvit)** (Google AI から) Matthias Minderer, Alexey Gritsenko, Austin Stone, Maxim Neumann, Dirk Weissenborn, Alexey Dosovitskiy, Aravindh Mahendran, Anurag Arnab, Mostafa Dehghani, Zhuoran Shen, Xiao Wang, Xiaohua Zhai, Thomas Kipf, and Neil Houlsby から公開された研究論文: [Simple Open-Vocabulary Object Detection with Vision Transformers](https://arxiv.org/abs/2205.06230)
|
||||||
|
1. **[Pegasus](https://huggingface.co/docs/transformers/model_doc/pegasus)** (Google から) Jingqing Zhang, Yao Zhao, Mohammad Saleh and Peter J. Liu から公開された研究論文: [PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive Summarization](https://arxiv.org/abs/1912.08777)
|
||||||
|
1. **[PEGASUS-X](https://huggingface.co/docs/transformers/model_doc/pegasus_x)** (Google から) Jason Phang, Yao Zhao, and Peter J. Liu から公開された研究論文: [Investigating Efficiently Extending Transformers for Long Input Summarization](https://arxiv.org/abs/2208.04347)
|
||||||
|
1. **[Perceiver IO](https://huggingface.co/docs/transformers/model_doc/perceiver)** (Deepmind から) Andrew Jaegle, Sebastian Borgeaud, Jean-Baptiste Alayrac, Carl Doersch, Catalin Ionescu, David Ding, Skanda Koppula, Daniel Zoran, Andrew Brock, Evan Shelhamer, Olivier Hénaff, Matthew M. Botvinick, Andrew Zisserman, Oriol Vinyals, João Carreira から公開された研究論文: [Perceiver IO: A General Architecture for Structured Inputs & Outputs](https://arxiv.org/abs/2107.14795)
|
||||||
|
1. **[PhoBERT](https://huggingface.co/docs/transformers/model_doc/phobert)** (VinAI Research から) Dat Quoc Nguyen and Anh Tuan Nguyen から公開された研究論文: [PhoBERT: Pre-trained language models for Vietnamese](https://www.aclweb.org/anthology/2020.findings-emnlp.92/)
|
||||||
|
1. **[Pix2Struct](https://huggingface.co/docs/transformers/model_doc/pix2struct)** (Google から) Kenton Lee, Mandar Joshi, Iulia Turc, Hexiang Hu, Fangyu Liu, Julian Eisenschlos, Urvashi Khandelwal, Peter Shaw, Ming-Wei Chang, Kristina Toutanova. から公開された研究論文 [Pix2Struct: Screenshot Parsing as Pretraining for Visual Language Understanding](https://arxiv.org/abs/2210.03347)
|
||||||
|
1. **[PLBart](https://huggingface.co/docs/transformers/model_doc/plbart)** (UCLA NLP から) Wasi Uddin Ahmad, Saikat Chakraborty, Baishakhi Ray, Kai-Wei Chang から公開された研究論文: [Unified Pre-training for Program Understanding and Generation](https://arxiv.org/abs/2103.06333)
|
||||||
|
1. **[PoolFormer](https://huggingface.co/docs/transformers/model_doc/poolformer)** (Sea AI Labs から) Yu, Weihao and Luo, Mi and Zhou, Pan and Si, Chenyang and Zhou, Yichen and Wang, Xinchao and Feng, Jiashi and Yan, Shuicheng から公開された研究論文: [MetaFormer is Actually What You Need for Vision](https://arxiv.org/abs/2111.11418)
|
||||||
|
1. **[ProphetNet](https://huggingface.co/docs/transformers/model_doc/prophetnet)** (Microsoft Research から) Yu Yan, Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang and Ming Zhou から公開された研究論文: [ProphetNet: Predicting Future N-gram for Sequence-to-Sequence Pre-training](https://arxiv.org/abs/2001.04063)
|
||||||
|
1. **[QDQBert](https://huggingface.co/docs/transformers/model_doc/qdqbert)** (NVIDIA から) Hao Wu, Patrick Judd, Xiaojie Zhang, Mikhail Isaev and Paulius Micikevicius から公開された研究論文: [Integer Quantization for Deep Learning Inference: Principles and Empirical Evaluation](https://arxiv.org/abs/2004.09602)
|
||||||
|
1. **[RAG](https://huggingface.co/docs/transformers/model_doc/rag)** (Facebook から) Patrick Lewis, Ethan Perez, Aleksandara Piktus, Fabio Petroni, Vladimir Karpukhin, Naman Goyal, Heinrich Küttler, Mike Lewis, Wen-tau Yih, Tim Rocktäschel, Sebastian Riedel, Douwe Kiela から公開された研究論文: [Retrieval-Augmented Generation for Knowledge-Intensive NLP Tasks](https://arxiv.org/abs/2005.11401)
|
||||||
|
1. **[REALM](https://huggingface.co/docs/transformers/model_doc/realm.html)** (Google Research から) Kelvin Guu, Kenton Lee, Zora Tung, Panupong Pasupat and Ming-Wei Chang から公開された研究論文: [REALM: Retrieval-Augmented Language Model Pre-Training](https://arxiv.org/abs/2002.08909)
|
||||||
|
1. **[Reformer](https://huggingface.co/docs/transformers/model_doc/reformer)** (Google Research から) Nikita Kitaev, Łukasz Kaiser, Anselm Levskaya から公開された研究論文: [Reformer: The Efficient Transformer](https://arxiv.org/abs/2001.04451)
|
||||||
|
1. **[RegNet](https://huggingface.co/docs/transformers/model_doc/regnet)** (META Platforms から) Ilija Radosavovic, Raj Prateek Kosaraju, Ross Girshick, Kaiming He, Piotr Dollár から公開された研究論文: [Designing Network Design Space](https://arxiv.org/abs/2003.13678)
|
||||||
|
1. **[RemBERT](https://huggingface.co/docs/transformers/model_doc/rembert)** (Google Research から) Hyung Won Chung, Thibault Févry, Henry Tsai, M. Johnson, Sebastian Ruder から公開された研究論文: [Rethinking embedding coupling in pre-trained language models](https://arxiv.org/abs/2010.12821)
|
||||||
|
1. **[ResNet](https://huggingface.co/docs/transformers/model_doc/resnet)** (Microsoft Research から) Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun から公開された研究論文: [Deep Residual Learning for Image Recognition](https://arxiv.org/abs/1512.03385)
|
||||||
|
1. **[RoBERTa](https://huggingface.co/docs/transformers/model_doc/roberta)** (Facebook から), Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, Veselin Stoyanov から公開された研究論文: [RoBERTa: A Robustly Optimized BERT Pretraining Approach](https://arxiv.org/abs/1907.11692)
|
||||||
|
1. **[RoBERTa-PreLayerNorm](https://huggingface.co/docs/transformers/model_doc/roberta-prelayernorm)** (Facebook から) Myle Ott, Sergey Edunov, Alexei Baevski, Angela Fan, Sam Gross, Nathan Ng, David Grangier, Michael Auli から公開された研究論文: [fairseq: A Fast, Extensible Toolkit for Sequence Modeling](https://arxiv.org/abs/1904.01038)
|
||||||
|
1. **[RoCBert](https://huggingface.co/docs/transformers/model_doc/roc_bert)** (WeChatAI から) HuiSu, WeiweiShi, XiaoyuShen, XiaoZhou, TuoJi, JiaruiFang, JieZhou から公開された研究論文: [RoCBert: Robust Chinese Bert with Multimodal Contrastive Pretraining](https://aclanthology.org/2022.acl-long.65.pdf)
|
||||||
|
1. **[RoFormer](https://huggingface.co/docs/transformers/model_doc/roformer)** (ZhuiyiTechnology から), Jianlin Su and Yu Lu and Shengfeng Pan and Bo Wen and Yunfeng Liu から公開された研究論文: [RoFormer: Enhanced Transformer with Rotary Position Embedding](https://arxiv.org/abs/2104.09864)
|
||||||
|
1. **[RWKV](https://huggingface.co/docs/transformers/model_doc/rwkv)** (Bo Peng から) Bo Peng. から公開された研究論文 [this repo](https://github.com/BlinkDL/RWKV-LM)
|
||||||
|
1. **[SegFormer](https://huggingface.co/docs/transformers/model_doc/segformer)** (NVIDIA から) Enze Xie, Wenhai Wang, Zhiding Yu, Anima Anandkumar, Jose M. Alvarez, Ping Luo から公開された研究論文: [SegFormer: Simple and Efficient Design for Semantic Segmentation with Transformers](https://arxiv.org/abs/2105.15203)
|
||||||
|
1. **[Segment Anything](https://huggingface.co/docs/transformers/model_doc/sam)** (Meta AI から) Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alex Berg, Wan-Yen Lo, Piotr Dollar, Ross Girshick. から公開された研究論文 [Segment Anything](https://arxiv.org/pdf/2304.02643v1.pdf)
|
||||||
|
1. **[SEW](https://huggingface.co/docs/transformers/model_doc/sew)** (ASAPP から) Felix Wu, Kwangyoun Kim, Jing Pan, Kyu Han, Kilian Q. Weinberger, Yoav Artzi から公開された研究論文: [Performance-Efficiency Trade-offs in Unsupervised Pre-training for Speech Recognition](https://arxiv.org/abs/2109.06870)
|
||||||
|
1. **[SEW-D](https://huggingface.co/docs/transformers/model_doc/sew_d)** (ASAPP から) Felix Wu, Kwangyoun Kim, Jing Pan, Kyu Han, Kilian Q. Weinberger, Yoav Artzi から公開された研究論文: [Performance-Efficiency Trade-offs in Unsupervised Pre-training for Speech Recognition](https://arxiv.org/abs/2109.06870)
|
||||||
|
1. **[SpeechT5](https://huggingface.co/docs/transformers/model_doc/speecht5)** (Microsoft Research から) Junyi Ao, Rui Wang, Long Zhou, Chengyi Wang, Shuo Ren, Yu Wu, Shujie Liu, Tom Ko, Qing Li, Yu Zhang, Zhihua Wei, Yao Qian, Jinyu Li, Furu Wei. から公開された研究論文 [SpeechT5: Unified-Modal Encoder-Decoder Pre-Training for Spoken Language Processing](https://arxiv.org/abs/2110.07205)
|
||||||
|
1. **[SpeechToTextTransformer](https://huggingface.co/docs/transformers/model_doc/speech_to_text)** (Facebook から), Changhan Wang, Yun Tang, Xutai Ma, Anne Wu, Dmytro Okhonko, Juan Pino から公開された研究論文: [fairseq S2T: Fast Speech-to-Text Modeling with fairseq](https://arxiv.org/abs/2010.05171)
|
||||||
|
1. **[SpeechToTextTransformer2](https://huggingface.co/docs/transformers/model_doc/speech_to_text_2)** (Facebook から), Changhan Wang, Anne Wu, Juan Pino, Alexei Baevski, Michael Auli, Alexis Conneau から公開された研究論文: [Large-Scale Self- and Semi-Supervised Learning for Speech Translation](https://arxiv.org/abs/2104.06678)
|
||||||
|
1. **[Splinter](https://huggingface.co/docs/transformers/model_doc/splinter)** (Tel Aviv University から), Ori Ram, Yuval Kirstain, Jonathan Berant, Amir Globerson, Omer Levy から公開された研究論文: [Few-Shot Question Answering by Pretraining Span Selection](https://arxiv.org/abs/2101.00438)
|
||||||
|
1. **[SqueezeBERT](https://huggingface.co/docs/transformers/model_doc/squeezebert)** (Berkeley から) Forrest N. Iandola, Albert E. Shaw, Ravi Krishna, and Kurt W. Keutzer から公開された研究論文: [SqueezeBERT: What can computer vision teach NLP about efficient neural networks?](https://arxiv.org/abs/2006.11316)
|
||||||
|
1. **[SwiftFormer](https://huggingface.co/docs/transformers/model_doc/swiftformer)** (MBZUAI から) Abdelrahman Shaker, Muhammad Maaz, Hanoona Rasheed, Salman Khan, Ming-Hsuan Yang, Fahad Shahbaz Khan. から公開された研究論文 [SwiftFormer: Efficient Additive Attention for Transformer-based Real-time Mobile Vision Applications](https://arxiv.org/abs/2303.15446)
|
||||||
|
1. **[Swin Transformer](https://huggingface.co/docs/transformers/model_doc/swin)** (Microsoft から) Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, Baining Guo から公開された研究論文: [Swin Transformer: Hierarchical Vision Transformer using Shifted Windows](https://arxiv.org/abs/2103.14030)
|
||||||
|
1. **[Swin Transformer V2](https://huggingface.co/docs/transformers/model_doc/swinv2)** (Microsoft から) Ze Liu, Han Hu, Yutong Lin, Zhuliang Yao, Zhenda Xie, Yixuan Wei, Jia Ning, Yue Cao, Zheng Zhang, Li Dong, Furu Wei, Baining Guo から公開された研究論文: [Swin Transformer V2: Scaling Up Capacity and Resolution](https://arxiv.org/abs/2111.09883)
|
||||||
|
1. **[Swin2SR](https://huggingface.co/docs/transformers/model_doc/swin2sr)** (University of Würzburg から) Marcos V. Conde, Ui-Jin Choi, Maxime Burchi, Radu Timofte から公開された研究論文: [Swin2SR: SwinV2 Transformer for Compressed Image Super-Resolution and Restoration](https://arxiv.org/abs/2209.11345)
|
||||||
|
1. **[SwitchTransformers](https://huggingface.co/docs/transformers/model_doc/switch_transformers)** (Google から) William Fedus, Barret Zoph, Noam Shazeer から公開された研究論文: [Switch Transformers: Scaling to Trillion Parameter Models with Simple and Efficient Sparsity](https://arxiv.org/abs/2101.03961)
|
||||||
|
1. **[T5](https://huggingface.co/docs/transformers/model_doc/t5)** (Google AI から) Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu から公開された研究論文: [Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer](https://arxiv.org/abs/1910.10683)
|
||||||
|
1. **[T5v1.1](https://huggingface.co/docs/transformers/model_doc/t5v1.1)** (Google AI から) Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu から公開されたレポジトリー [google-research/text-to-text-transfer-transformer](https://github.com/google-research/text-to-text-transfer-transformer/blob/main/released_checkpoints.md#t511)
|
||||||
|
1. **[Table Transformer](https://huggingface.co/docs/transformers/model_doc/table-transformer)** (Microsoft Research から) Brandon Smock, Rohith Pesala, Robin Abraham から公開された研究論文: [PubTables-1M: Towards Comprehensive Table Extraction From Unstructured Documents](https://arxiv.org/abs/2110.00061)
|
||||||
|
1. **[TAPAS](https://huggingface.co/docs/transformers/model_doc/tapas)** (Google AI から) Jonathan Herzig, Paweł Krzysztof Nowak, Thomas Müller, Francesco Piccinno and Julian Martin Eisenschlos から公開された研究論文: [TAPAS: Weakly Supervised Table Parsing via Pre-training](https://arxiv.org/abs/2004.02349)
|
||||||
|
1. **[TAPEX](https://huggingface.co/docs/transformers/model_doc/tapex)** (Microsoft Research から) Qian Liu, Bei Chen, Jiaqi Guo, Morteza Ziyadi, Zeqi Lin, Weizhu Chen, Jian-Guang Lou から公開された研究論文: [TAPEX: Table Pre-training via Learning a Neural SQL Executor](https://arxiv.org/abs/2107.07653)
|
||||||
|
1. **[Time Series Transformer](https://huggingface.co/docs/transformers/model_doc/time_series_transformer)** (HuggingFace から).
|
||||||
|
1. **[TimeSformer](https://huggingface.co/docs/transformers/model_doc/timesformer)** (Facebook から) Gedas Bertasius, Heng Wang, Lorenzo Torresani から公開された研究論文: [Is Space-Time Attention All You Need for Video Understanding?](https://arxiv.org/abs/2102.05095)
|
||||||
|
1. **[Trajectory Transformer](https://huggingface.co/docs/transformers/model_doc/trajectory_transformers)** (the University of California at Berkeley から) Michael Janner, Qiyang Li, Sergey Levine から公開された研究論文: [Offline Reinforcement Learning as One Big Sequence Modeling Problem](https://arxiv.org/abs/2106.02039)
|
||||||
|
1. **[Transformer-XL](https://huggingface.co/docs/transformers/model_doc/transfo-xl)** (Google/CMU から) Zihang Dai*, Zhilin Yang*, Yiming Yang, Jaime Carbonell, Quoc V. Le, Ruslan Salakhutdinov から公開された研究論文: [Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context](https://arxiv.org/abs/1901.02860)
|
||||||
|
1. **[TrOCR](https://huggingface.co/docs/transformers/model_doc/trocr)** (Microsoft から), Minghao Li, Tengchao Lv, Lei Cui, Yijuan Lu, Dinei Florencio, Cha Zhang, Zhoujun Li, Furu Wei から公開された研究論文: [TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models](https://arxiv.org/abs/2109.10282)
|
||||||
|
1. **[TVLT](https://huggingface.co/docs/transformers/model_doc/tvlt)** (from UNC Chapel Hill から), Zineng Tang, Jaemin Cho, Yixin Nie, Mohit Bansal から公開された研究論文: [TVLT: Textless Vision-Language Transformer](https://arxiv.org/abs/2209.14156)
|
||||||
|
1. **[UL2](https://huggingface.co/docs/transformers/model_doc/ul2)** (Google Research から) Yi Tay, Mostafa Dehghani, Vinh Q から公開された研究論文: [Unifying Language Learning Paradigms](https://arxiv.org/abs/2205.05131v1) Tran, Xavier Garcia, Dara Bahri, Tal Schuster, Huaixiu Steven Zheng, Neil Houlsby, Donald Metzler
|
||||||
|
1. **[UMT5](https://huggingface.co/docs/transformers/model_doc/umt5)** (Google Research から) Hyung Won Chung, Xavier Garcia, Adam Roberts, Yi Tay, Orhan Firat, Sharan Narang, Noah Constant. から公開された研究論文 [UniMax: Fairer and More Effective Language Sampling for Large-Scale Multilingual Pretraining](https://openreview.net/forum?id=kXwdL1cWOAi)
|
||||||
|
1. **[UniSpeech](https://huggingface.co/docs/transformers/model_doc/unispeech)** (Microsoft Research から) Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei, Michael Zeng, Xuedong Huang から公開された研究論文: [UniSpeech: Unified Speech Representation Learning with Labeled and Unlabeled Data](https://arxiv.org/abs/2101.07597)
|
||||||
|
1. **[UniSpeechSat](https://huggingface.co/docs/transformers/model_doc/unispeech-sat)** (Microsoft Research から) Sanyuan Chen, Yu Wu, Chengyi Wang, Zhengyang Chen, Zhuo Chen, Shujie Liu, Jian Wu, Yao Qian, Furu Wei, Jinyu Li, Xiangzhan Yu から公開された研究論文: [UNISPEECH-SAT: UNIVERSAL SPEECH REPRESENTATION LEARNING WITH SPEAKER AWARE PRE-TRAINING](https://arxiv.org/abs/2110.05752)
|
||||||
|
1. **[UPerNet](https://huggingface.co/docs/transformers/model_doc/upernet)** (Peking University から) Tete Xiao, Yingcheng Liu, Bolei Zhou, Yuning Jiang, Jian Sun. から公開された研究論文 [Unified Perceptual Parsing for Scene Understanding](https://arxiv.org/abs/1807.10221)
|
||||||
|
1. **[VAN](https://huggingface.co/docs/transformers/model_doc/van)** (Tsinghua University and Nankai University から) Meng-Hao Guo, Cheng-Ze Lu, Zheng-Ning Liu, Ming-Ming Cheng, Shi-Min Hu から公開された研究論文: [Visual Attention Network](https://arxiv.org/abs/2202.09741)
|
||||||
|
1. **[VideoMAE](https://huggingface.co/docs/transformers/model_doc/videomae)** (Multimedia Computing Group, Nanjing University から) Zhan Tong, Yibing Song, Jue Wang, Limin Wang から公開された研究論文: [VideoMAE: Masked Autoencoders are Data-Efficient Learners for Self-Supervised Video Pre-Training](https://arxiv.org/abs/2203.12602)
|
||||||
|
1. **[ViLT](https://huggingface.co/docs/transformers/model_doc/vilt)** (NAVER AI Lab/Kakao Enterprise/Kakao Brain から) Wonjae Kim, Bokyung Son, Ildoo Kim から公開された研究論文: [ViLT: Vision-and-Language Transformer Without Convolution or Region Supervision](https://arxiv.org/abs/2102.03334)
|
||||||
|
1. **[Vision Transformer (ViT)](https://huggingface.co/docs/transformers/model_doc/vit)** (Google AI から) Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, Neil Houlsby から公開された研究論文: [An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale](https://arxiv.org/abs/2010.11929)
|
||||||
|
1. **[VisualBERT](https://huggingface.co/docs/transformers/model_doc/visual_bert)** (UCLA NLP から) Liunian Harold Li, Mark Yatskar, Da Yin, Cho-Jui Hsieh, Kai-Wei Chang から公開された研究論文: [VisualBERT: A Simple and Performant Baseline for Vision and Language](https://arxiv.org/pdf/1908.03557)
|
||||||
|
1. **[ViT Hybrid](https://huggingface.co/docs/transformers/model_doc/vit_hybrid)** (Google AI から) Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, Neil Houlsby から公開された研究論文: [An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale](https://arxiv.org/abs/2010.11929)
|
||||||
|
1. **[ViTMAE](https://huggingface.co/docs/transformers/model_doc/vit_mae)** (Meta AI から) Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Dollár, Ross Girshick から公開された研究論文: [Masked Autoencoders Are Scalable Vision Learners](https://arxiv.org/abs/2111.06377)
|
||||||
|
1. **[ViTMSN](https://huggingface.co/docs/transformers/model_doc/vit_msn)** (Meta AI から) Mahmoud Assran, Mathilde Caron, Ishan Misra, Piotr Bojanowski, Florian Bordes, Pascal Vincent, Armand Joulin, Michael Rabbat, Nicolas Ballas から公開された研究論文: [Masked Siamese Networks for Label-Efficient Learning](https://arxiv.org/abs/2204.07141)
|
||||||
|
1. **[ViViT](https://huggingface.co/docs/transformers/model_doc/vivit)** (from Google Research) released with the paper [ViViT: A Video Vision Transformer](https://arxiv.org/abs/2103.15691) by Anurag Arnab, Mostafa Dehghani, Georg Heigold, Chen Sun, Mario Lučić, Cordelia Schmid.
|
||||||
|
1. **[Wav2Vec2](https://huggingface.co/docs/transformers/model_doc/wav2vec2)** (Facebook AI から) Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli から公開された研究論文: [wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations](https://arxiv.org/abs/2006.11477)
|
||||||
|
1. **[Wav2Vec2-Conformer](https://huggingface.co/docs/transformers/model_doc/wav2vec2-conformer)** (Facebook AI から) Changhan Wang, Yun Tang, Xutai Ma, Anne Wu, Sravya Popuri, Dmytro Okhonko, Juan Pino から公開された研究論文: [FAIRSEQ S2T: Fast Speech-to-Text Modeling with FAIRSEQ](https://arxiv.org/abs/2010.05171)
|
||||||
|
1. **[Wav2Vec2Phoneme](https://huggingface.co/docs/transformers/model_doc/wav2vec2_phoneme)** (Facebook AI から) Qiantong Xu, Alexei Baevski, Michael Auli から公開された研究論文: [Simple and Effective Zero-shot Cross-lingual Phoneme Recognition](https://arxiv.org/abs/2109.11680)
|
||||||
|
1. **[WavLM](https://huggingface.co/docs/transformers/model_doc/wavlm)** (Microsoft Research から) Sanyuan Chen, Chengyi Wang, Zhengyang Chen, Yu Wu, Shujie Liu, Zhuo Chen, Jinyu Li, Naoyuki Kanda, Takuya Yoshioka, Xiong Xiao, Jian Wu, Long Zhou, Shuo Ren, Yanmin Qian, Yao Qian, Jian Wu, Michael Zeng, Furu Wei から公開された研究論文: [WavLM: Large-Scale Self-Supervised Pre-Training for Full Stack Speech Processing](https://arxiv.org/abs/2110.13900)
|
||||||
|
1. **[Whisper](https://huggingface.co/docs/transformers/model_doc/whisper)** (OpenAI から) Alec Radford, Jong Wook Kim, Tao Xu, Greg Brockman, Christine McLeavey, Ilya Sutskever から公開された研究論文: [Robust Speech Recognition via Large-Scale Weak Supervision](https://cdn.openai.com/papers/whisper.pdf)
|
||||||
|
1. **[X-CLIP](https://huggingface.co/docs/transformers/model_doc/xclip)** (Microsoft Research から) Bolin Ni, Houwen Peng, Minghao Chen, Songyang Zhang, Gaofeng Meng, Jianlong Fu, Shiming Xiang, Haibin Ling から公開された研究論文: [Expanding Language-Image Pretrained Models for General Video Recognition](https://arxiv.org/abs/2208.02816)
|
||||||
|
1. **[X-MOD](https://huggingface.co/docs/transformers/model_doc/xmod)** (Meta AI から) Jonas Pfeiffer, Naman Goyal, Xi Lin, Xian Li, James Cross, Sebastian Riedel, Mikel Artetxe. から公開された研究論文 [Lifting the Curse of Multilinguality by Pre-training Modular Transformers](http://dx.doi.org/10.18653/v1/2022.naacl-main.255)
|
||||||
|
1. **[XGLM](https://huggingface.co/docs/transformers/model_doc/xglm)** (From Facebook AI) Xi Victoria Lin, Todor Mihaylov, Mikel Artetxe, Tianlu Wang, Shuohui Chen, Daniel Simig, Myle Ott, Naman Goyal, Shruti Bhosale, Jingfei Du, Ramakanth Pasunuru, Sam Shleifer, Punit Singh Koura, Vishrav Chaudhary, Brian O'Horo, Jeff Wang, Luke Zettlemoyer, Zornitsa Kozareva, Mona Diab, Veselin Stoyanov, Xian Li から公開された研究論文: [Few-shot Learning with Multilingual Language Models](https://arxiv.org/abs/2112.10668)
|
||||||
|
1. **[XLM](https://huggingface.co/docs/transformers/model_doc/xlm)** (Facebook から) Guillaume Lample and Alexis Conneau から公開された研究論文: [Cross-lingual Language Model Pretraining](https://arxiv.org/abs/1901.07291)
|
||||||
|
1. **[XLM-ProphetNet](https://huggingface.co/docs/transformers/model_doc/xlm-prophetnet)** (Microsoft Research から) Yu Yan, Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang and Ming Zhou から公開された研究論文: [ProphetNet: Predicting Future N-gram for Sequence-to-Sequence Pre-training](https://arxiv.org/abs/2001.04063)
|
||||||
|
1. **[XLM-RoBERTa](https://huggingface.co/docs/transformers/model_doc/xlm-roberta)** (Facebook AI から), Alexis Conneau*, Kartikay Khandelwal*, Naman Goyal, Vishrav Chaudhary, Guillaume Wenzek, Francisco Guzmán, Edouard Grave, Myle Ott, Luke Zettlemoyer and Veselin Stoyanov から公開された研究論文: [Unsupervised Cross-lingual Representation Learning at Scale](https://arxiv.org/abs/1911.02116)
|
||||||
|
1. **[XLM-RoBERTa-XL](https://huggingface.co/docs/transformers/model_doc/xlm-roberta-xl)** (Facebook AI から), Naman Goyal, Jingfei Du, Myle Ott, Giri Anantharaman, Alexis Conneau から公開された研究論文: [Larger-Scale Transformers for Multilingual Masked Language Modeling](https://arxiv.org/abs/2105.00572)
|
||||||
|
1. **[XLM-V](https://huggingface.co/docs/transformers/model_doc/xlm-v)** (Meta AI から) Davis Liang, Hila Gonen, Yuning Mao, Rui Hou, Naman Goyal, Marjan Ghazvininejad, Luke Zettlemoyer, Madian Khabsa から公開された研究論文: [XLM-V: Overcoming the Vocabulary Bottleneck in Multilingual Masked Language Models](https://arxiv.org/abs/2301.10472)
|
||||||
|
1. **[XLNet](https://huggingface.co/docs/transformers/model_doc/xlnet)** (Google/CMU から) Zhilin Yang*, Zihang Dai*, Yiming Yang, Jaime Carbonell, Ruslan Salakhutdinov, Quoc V. Le から公開された研究論文: [XLNet: Generalized Autoregressive Pretraining for Language Understanding](https://arxiv.org/abs/1906.08237)
|
||||||
|
1. **[XLS-R](https://huggingface.co/docs/transformers/model_doc/xls_r)** (Facebook AI から) Arun Babu, Changhan Wang, Andros Tjandra, Kushal Lakhotia, Qiantong Xu, Naman Goyal, Kritika Singh, Patrick von Platen, Yatharth Saraf, Juan Pino, Alexei Baevski, Alexis Conneau, Michael Auli から公開された研究論文: [XLS-R: Self-supervised Cross-lingual Speech Representation Learning at Scale](https://arxiv.org/abs/2111.09296)
|
||||||
|
1. **[XLSR-Wav2Vec2](https://huggingface.co/docs/transformers/model_doc/xlsr_wav2vec2)** (Facebook AI から) Alexis Conneau, Alexei Baevski, Ronan Collobert, Abdelrahman Mohamed, Michael Auli から公開された研究論文: [Unsupervised Cross-Lingual Representation Learning For Speech Recognition](https://arxiv.org/abs/2006.13979)
|
||||||
|
1. **[YOLOS](https://huggingface.co/docs/transformers/model_doc/yolos)** (Huazhong University of Science & Technology から) Yuxin Fang, Bencheng Liao, Xinggang Wang, Jiemin Fang, Jiyang Qi, Rui Wu, Jianwei Niu, Wenyu Liu から公開された研究論文: [You Only Look at One Sequence: Rethinking Transformer in Vision through Object Detection](https://arxiv.org/abs/2106.00666)
|
||||||
|
1. **[YOSO](https://huggingface.co/docs/transformers/model_doc/yoso)** (the University of Wisconsin - Madison から) Zhanpeng Zeng, Yunyang Xiong, Sathya N. Ravi, Shailesh Acharya, Glenn Fung, Vikas Singh から公開された研究論文: [You Only Sample (Almost) Once: Linear Cost Self-Attention Via Bernoulli Sampling](https://arxiv.org/abs/2111.09714)
|
||||||
|
1. 新しいモデルを投稿したいですか?新しいモデルを追加するためのガイドとして、**詳細なガイドとテンプレート**が追加されました。これらはリポジトリの[`templates`](./templates)フォルダにあります。PRを始める前に、必ず[コントリビューションガイド](./CONTRIBUTING.md)を確認し、メンテナに連絡するか、フィードバックを収集するためにissueを開いてください。
|
||||||
|
|
||||||
|
各モデルがFlax、PyTorch、TensorFlowで実装されているか、🤗Tokenizersライブラリに支えられた関連トークナイザを持っているかは、[この表](https://huggingface.co/docs/transformers/index#supported-frameworks)を参照してください。
|
||||||
|
|
||||||
|
これらの実装はいくつかのデータセットでテストされており(サンプルスクリプトを参照)、オリジナルの実装の性能と一致するはずである。性能の詳細は[documentation](https://github.com/huggingface/transformers/tree/main/examples)のExamplesセクションで見ることができます。
|
||||||
|
|
||||||
|
|
||||||
|
## さらに詳しく
|
||||||
|
|
||||||
|
| セクション | 概要 |
|
||||||
|
|-|-|
|
||||||
|
| [ドキュメント](https://huggingface.co/docs/transformers/) | 完全なAPIドキュメントとチュートリアル |
|
||||||
|
| [タスク概要](https://huggingface.co/docs/transformers/task_summary) | 🤗Transformersがサポートするタスク |
|
||||||
|
| [前処理チュートリアル](https://huggingface.co/docs/transformers/preprocessing) | モデル用のデータを準備するために`Tokenizer`クラスを使用 |
|
||||||
|
| [トレーニングと微調整](https://huggingface.co/docs/transformers/training) | PyTorch/TensorFlowの学習ループと`Trainer`APIで🤗Transformersが提供するモデルを使用 |
|
||||||
|
| [クイックツアー: 微調整/使用方法スクリプト](https://github.com/huggingface/transformers/tree/main/examples) | 様々なタスクでモデルの微調整を行うためのスクリプト例 |
|
||||||
|
| [モデルの共有とアップロード](https://huggingface.co/docs/transformers/model_sharing) | 微調整したモデルをアップロードしてコミュニティで共有する |
|
||||||
|
| [マイグレーション](https://huggingface.co/docs/transformers/migration) | `pytorch-transformers`または`pytorch-pretrained-bert`から🤗Transformers に移行する |
|
||||||
|
|
||||||
|
## 引用
|
||||||
|
|
||||||
|
🤗 トランスフォーマーライブラリに引用できる[論文](https://www.aclweb.org/anthology/2020.emnlp-demos.6/)が出来ました:
|
||||||
|
```bibtex
|
||||||
|
@inproceedings{wolf-etal-2020-transformers,
|
||||||
|
title = "Transformers: State-of-the-Art Natural Language Processing",
|
||||||
|
author = "Thomas Wolf and Lysandre Debut and Victor Sanh and Julien Chaumond and Clement Delangue and Anthony Moi and Pierric Cistac and Tim Rault and Rémi Louf and Morgan Funtowicz and Joe Davison and Sam Shleifer and Patrick von Platen and Clara Ma and Yacine Jernite and Julien Plu and Canwen Xu and Teven Le Scao and Sylvain Gugger and Mariama Drame and Quentin Lhoest and Alexander M. Rush",
|
||||||
|
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: System Demonstrations",
|
||||||
|
month = oct,
|
||||||
|
year = "2020",
|
||||||
|
address = "Online",
|
||||||
|
publisher = "Association for Computational Linguistics",
|
||||||
|
url = "https://www.aclweb.org/anthology/2020.emnlp-demos.6",
|
||||||
|
pages = "38--45"
|
||||||
|
}
|
||||||
|
```
|
||||||
316
README_ko.md
316
README_ko.md
@ -44,7 +44,9 @@ limitations under the License.
|
|||||||
<a href="https://github.com/huggingface/transformers/blob/main/README_zh-hans.md">简体中文</a> |
|
<a href="https://github.com/huggingface/transformers/blob/main/README_zh-hans.md">简体中文</a> |
|
||||||
<a href="https://github.com/huggingface/transformers/blob/main/README_zh-hant.md">繁體中文</a> |
|
<a href="https://github.com/huggingface/transformers/blob/main/README_zh-hant.md">繁體中文</a> |
|
||||||
<b>한국어</b> |
|
<b>한국어</b> |
|
||||||
<a href="https://github.com/huggingface/transformers/blob/main/README_es.md">Español</a>
|
<a href="https://github.com/huggingface/transformers/blob/main/README_es.md">Español</a> |
|
||||||
|
<a href="https://github.com/huggingface/transformers/blob/main/README_ja.md">日本語</a> |
|
||||||
|
<a href="https://github.com/huggingface/transformers/blob/main/README_hd.md">हिन्दी</a>
|
||||||
<p>
|
<p>
|
||||||
</h4>
|
</h4>
|
||||||
|
|
||||||
@ -211,6 +213,11 @@ Flax, PyTorch, TensorFlow 설치 페이지에서 이들을 conda로 설치하는
|
|||||||
🤗 Transformers는 다음 모델들을 제공합니다 (각 모델의 요약은 [여기](https://huggingface.co/docs/transformers/model_summary)서 확인하세요):
|
🤗 Transformers는 다음 모델들을 제공합니다 (각 모델의 요약은 [여기](https://huggingface.co/docs/transformers/model_summary)서 확인하세요):
|
||||||
|
|
||||||
1. **[ALBERT](https://huggingface.co/docs/transformers/model_doc/albert)** (from Google Research and the Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.
|
1. **[ALBERT](https://huggingface.co/docs/transformers/model_doc/albert)** (from Google Research and the Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.
|
||||||
|
1. **[ALIGN](https://huggingface.co/docs/transformers/model_doc/align)** (Google Research 에서 제공)은 Chao Jia, Yinfei Yang, Ye Xia, Yi-Ting Chen, Zarana Parekh, Hieu Pham, Quoc V. Le, Yunhsuan Sung, Zhen Li, Tom Duerig.의 [Scaling Up Visual and Vision-Language Representation Learning With Noisy Text Supervision](https://arxiv.org/abs/2102.05918)논문과 함께 발표했습니다.
|
||||||
|
1. **[AltCLIP](https://huggingface.co/docs/transformers/model_doc/altclip)** (from BAAI) released with the paper [AltCLIP: Altering the Language Encoder in CLIP for Extended Language Capabilities](https://arxiv.org/abs/2211.06679) by Chen, Zhongzhi and Liu, Guang and Zhang, Bo-Wen and Ye, Fulong and Yang, Qinghong and Wu, Ledell.
|
||||||
|
1. **[Audio Spectrogram Transformer](https://huggingface.co/docs/transformers/model_doc/audio-spectrogram-transformer)** (from MIT) released with the paper [AST: Audio Spectrogram Transformer](https://arxiv.org/abs/2104.01778) by Yuan Gong, Yu-An Chung, James Glass.
|
||||||
|
1. **[Autoformer](https://huggingface.co/docs/transformers/model_doc/autoformer)** (from Tsinghua University) released with the paper [Autoformer: Decomposition Transformers with Auto-Correlation for Long-Term Series Forecasting](https://arxiv.org/abs/2106.13008) by Haixu Wu, Jiehui Xu, Jianmin Wang, Mingsheng Long.
|
||||||
|
1. **[Bark](https://huggingface.co/docs/transformers/model_doc/bark)** (from Suno) released in the repository [suno-ai/bark](https://github.com/suno-ai/bark) by Suno AI team.
|
||||||
1. **[BART](https://huggingface.co/docs/transformers/model_doc/bart)** (from Facebook) released with the paper [BART: Denoising Sequence-to-Sequence Pre-training for Natural Language Generation, Translation, and Comprehension](https://arxiv.org/pdf/1910.13461.pdf) by Mike Lewis, Yinhan Liu, Naman Goyal, Marjan Ghazvininejad, Abdelrahman Mohamed, Omer Levy, Ves Stoyanov and Luke Zettlemoyer.
|
1. **[BART](https://huggingface.co/docs/transformers/model_doc/bart)** (from Facebook) released with the paper [BART: Denoising Sequence-to-Sequence Pre-training for Natural Language Generation, Translation, and Comprehension](https://arxiv.org/pdf/1910.13461.pdf) by Mike Lewis, Yinhan Liu, Naman Goyal, Marjan Ghazvininejad, Abdelrahman Mohamed, Omer Levy, Ves Stoyanov and Luke Zettlemoyer.
|
||||||
1. **[BARThez](https://huggingface.co/docs/transformers/model_doc/barthez)** (from École polytechnique) released with the paper [BARThez: a Skilled Pretrained French Sequence-to-Sequence Model](https://arxiv.org/abs/2010.12321) by Moussa Kamal Eddine, Antoine J.-P. Tixier, Michalis Vazirgiannis.
|
1. **[BARThez](https://huggingface.co/docs/transformers/model_doc/barthez)** (from École polytechnique) released with the paper [BARThez: a Skilled Pretrained French Sequence-to-Sequence Model](https://arxiv.org/abs/2010.12321) by Moussa Kamal Eddine, Antoine J.-P. Tixier, Michalis Vazirgiannis.
|
||||||
1. **[BARTpho](https://huggingface.co/docs/transformers/model_doc/bartpho)** (from VinAI Research) released with the paper [BARTpho: Pre-trained Sequence-to-Sequence Models for Vietnamese](https://arxiv.org/abs/2109.09701) by Nguyen Luong Tran, Duong Minh Le and Dat Quoc Nguyen.
|
1. **[BARTpho](https://huggingface.co/docs/transformers/model_doc/bartpho)** (from VinAI Research) released with the paper [BARTpho: Pre-trained Sequence-to-Sequence Models for Vietnamese](https://arxiv.org/abs/2109.09701) by Nguyen Luong Tran, Duong Minh Le and Dat Quoc Nguyen.
|
||||||
@ -220,143 +227,206 @@ Flax, PyTorch, TensorFlow 설치 페이지에서 이들을 conda로 설치하는
|
|||||||
1. **[BERTweet](https://huggingface.co/docs/transformers/model_doc/bertweet)** (from VinAI Research) released with the paper [BERTweet: A pre-trained language model for English Tweets](https://aclanthology.org/2020.emnlp-demos.2/) by Dat Quoc Nguyen, Thanh Vu and Anh Tuan Nguyen.
|
1. **[BERTweet](https://huggingface.co/docs/transformers/model_doc/bertweet)** (from VinAI Research) released with the paper [BERTweet: A pre-trained language model for English Tweets](https://aclanthology.org/2020.emnlp-demos.2/) by Dat Quoc Nguyen, Thanh Vu and Anh Tuan Nguyen.
|
||||||
1. **[BigBird-Pegasus](https://huggingface.co/docs/transformers/model_doc/bigbird_pegasus)** (from Google Research) released with the paper [Big Bird: Transformers for Longer Sequences](https://arxiv.org/abs/2007.14062) by Manzil Zaheer, Guru Guruganesh, Avinava Dubey, Joshua Ainslie, Chris Alberti, Santiago Ontanon, Philip Pham, Anirudh Ravula, Qifan Wang, Li Yang, Amr Ahmed.
|
1. **[BigBird-Pegasus](https://huggingface.co/docs/transformers/model_doc/bigbird_pegasus)** (from Google Research) released with the paper [Big Bird: Transformers for Longer Sequences](https://arxiv.org/abs/2007.14062) by Manzil Zaheer, Guru Guruganesh, Avinava Dubey, Joshua Ainslie, Chris Alberti, Santiago Ontanon, Philip Pham, Anirudh Ravula, Qifan Wang, Li Yang, Amr Ahmed.
|
||||||
1. **[BigBird-RoBERTa](https://huggingface.co/docs/transformers/model_doc/big_bird)** (from Google Research) released with the paper [Big Bird: Transformers for Longer Sequences](https://arxiv.org/abs/2007.14062) by Manzil Zaheer, Guru Guruganesh, Avinava Dubey, Joshua Ainslie, Chris Alberti, Santiago Ontanon, Philip Pham, Anirudh Ravula, Qifan Wang, Li Yang, Amr Ahmed.
|
1. **[BigBird-RoBERTa](https://huggingface.co/docs/transformers/model_doc/big_bird)** (from Google Research) released with the paper [Big Bird: Transformers for Longer Sequences](https://arxiv.org/abs/2007.14062) by Manzil Zaheer, Guru Guruganesh, Avinava Dubey, Joshua Ainslie, Chris Alberti, Santiago Ontanon, Philip Pham, Anirudh Ravula, Qifan Wang, Li Yang, Amr Ahmed.
|
||||||
|
1. **[BioGpt](https://huggingface.co/docs/transformers/model_doc/biogpt)** (from Microsoft Research AI4Science) released with the paper [BioGPT: generative pre-trained transformer for biomedical text generation and mining](https://academic.oup.com/bib/advance-article/doi/10.1093/bib/bbac409/6713511?guestAccessKey=a66d9b5d-4f83-4017-bb52-405815c907b9) by Renqian Luo, Liai Sun, Yingce Xia, Tao Qin, Sheng Zhang, Hoifung Poon and Tie-Yan Liu.
|
||||||
|
1. **[BiT](https://huggingface.co/docs/transformers/model_doc/bit)** (from Google AI) released with the paper [Big Transfer (BiT) by Alexander Kolesnikov, Lucas Beyer, Xiaohua Zhai, Joan Puigcerver, Jessica Yung, Sylvain Gelly, Neil Houlsby.
|
||||||
1. **[Blenderbot](https://huggingface.co/docs/transformers/model_doc/blenderbot)** (from Facebook) released with the paper [Recipes for building an open-domain chatbot](https://arxiv.org/abs/2004.13637) by Stephen Roller, Emily Dinan, Naman Goyal, Da Ju, Mary Williamson, Yinhan Liu, Jing Xu, Myle Ott, Kurt Shuster, Eric M. Smith, Y-Lan Boureau, Jason Weston.
|
1. **[Blenderbot](https://huggingface.co/docs/transformers/model_doc/blenderbot)** (from Facebook) released with the paper [Recipes for building an open-domain chatbot](https://arxiv.org/abs/2004.13637) by Stephen Roller, Emily Dinan, Naman Goyal, Da Ju, Mary Williamson, Yinhan Liu, Jing Xu, Myle Ott, Kurt Shuster, Eric M. Smith, Y-Lan Boureau, Jason Weston.
|
||||||
1. **[BlenderbotSmall](https://huggingface.co/docs/transformers/model_doc/blenderbot-small)** (from Facebook) released with the paper [Recipes for building an open-domain chatbot](https://arxiv.org/abs/2004.13637) by Stephen Roller, Emily Dinan, Naman Goyal, Da Ju, Mary Williamson, Yinhan Liu, Jing Xu, Myle Ott, Kurt Shuster, Eric M. Smith, Y-Lan Boureau, Jason Weston.
|
1. **[BlenderbotSmall](https://huggingface.co/docs/transformers/model_doc/blenderbot-small)** (from Facebook) released with the paper [Recipes for building an open-domain chatbot](https://arxiv.org/abs/2004.13637) by Stephen Roller, Emily Dinan, Naman Goyal, Da Ju, Mary Williamson, Yinhan Liu, Jing Xu, Myle Ott, Kurt Shuster, Eric M. Smith, Y-Lan Boureau, Jason Weston.
|
||||||
1. **[BLOOM](https://huggingface.co/docs/transformers/model_doc/bloom)** (from BigScience workshop) released by the [BigSicence Workshop](https://bigscience.huggingface.co/).
|
1. **[BLIP](https://huggingface.co/docs/transformers/model_doc/blip)** (from Salesforce) released with the paper [BLIP: Bootstrapping Language-Image Pre-training for Unified Vision-Language Understanding and Generation](https://arxiv.org/abs/2201.12086) by Junnan Li, Dongxu Li, Caiming Xiong, Steven Hoi.
|
||||||
1. **[BORT](https://huggingface.co/docs/transformers/model_doc/bort)** (from Alexa) released with the paper [Optimal Subarchitecture Extraction For BERT](https://arxiv.org/abs/2010.10499) by Adrian de Wynter and Daniel J. Perry.
|
1. **[BLIP-2](https://huggingface.co/docs/transformers/model_doc/blip-2)** (Salesforce 에서 제공)은 Junnan Li, Dongxu Li, Silvio Savarese, Steven Hoi.의 [BLIP-2: Bootstrapping Language-Image Pre-training with Frozen Image Encoders and Large Language Models](https://arxiv.org/abs/2301.12597)논문과 함께 발표했습니다.
|
||||||
1. **[ByT5](https://huggingface.co/docs/transformers/model_doc/byt5)** (from Google Research) released with the paper [ByT5: Towards a token-free future with pre-trained byte-to-byte models](https://arxiv.org/abs/2105.13626) by Linting Xue, Aditya Barua, Noah Constant, Rami Al-Rfou, Sharan Narang, Mihir Kale, Adam Roberts, Colin Raffel.
|
1. **[BLOOM](https://huggingface.co/docs/transformers/model_doc/bloom)** (from BigScience workshop) released by the [BigScience Workshop](https://bigscience.huggingface.co/).
|
||||||
1. **[CamemBERT](https://huggingface.co/docs/transformers/model_doc/camembert)** (from Inria/Facebook/Sorbonne) released with the paper [CamemBERT: a Tasty French Language Model](https://arxiv.org/abs/1911.03894) by Louis Martin*, Benjamin Muller*, Pedro Javier Ortiz Suárez*, Yoann Dupont, Laurent Romary, Éric Villemonte de la Clergerie, Djamé Seddah and Benoît Sagot.
|
1. **[BORT](https://huggingface.co/docs/transformers/model_doc/bort)** (Alexa 에서) Adrian de Wynter and Daniel J. Perry 의 [Optimal Subarchitecture Extraction For BERT](https://arxiv.org/abs/2010.10499) 논문과 함께 발표했습니다.
|
||||||
1. **[CANINE](https://huggingface.co/docs/transformers/model_doc/canine)** (from Google Research) released with the paper [CANINE: Pre-training an Efficient Tokenization-Free Encoder for Language Representation](https://arxiv.org/abs/2103.06874) by Jonathan H. Clark, Dan Garrette, Iulia Turc, John Wieting.
|
1. **[BridgeTower](https://huggingface.co/docs/transformers/model_doc/bridgetower)** (from Harbin Institute of Technology/Microsoft Research Asia/Intel Labs) released with the paper [BridgeTower: Building Bridges Between Encoders in Vision-Language Representation Learning](https://arxiv.org/abs/2206.08657) by Xiao Xu, Chenfei Wu, Shachar Rosenman, Vasudev Lal, Wanxiang Che, Nan Duan.
|
||||||
1. **[CLIP](https://huggingface.co/docs/transformers/model_doc/clip)** (from OpenAI) released with the paper [Learning Transferable Visual Models From Natural Language Supervision](https://arxiv.org/abs/2103.00020) by Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, Ilya Sutskever.
|
1. **[ByT5](https://huggingface.co/docs/transformers/model_doc/byt5)** (Google Research 에서) Linting Xue, Aditya Barua, Noah Constant, Rami Al-Rfou, Sharan Narang, Mihir Kale, Adam Roberts, Colin Raffel 의 [ByT5: Towards a token-free future with pre-trained byte-to-byte models](https://arxiv.org/abs/2105.13626) 논문과 함께 발표했습니다.
|
||||||
1. **[CodeGen](https://huggingface.co/docs/transformers/model_doc/codegen)** (from Salesforce) released with the paper [A Conversational Paradigm for Program Synthesis](https://arxiv.org/abs/2203.13474) by Erik Nijkamp, Bo Pang, Hiroaki Hayashi, Lifu Tu, Huan Wang, Yingbo Zhou, Silvio Savarese, Caiming Xiong.
|
1. **[CamemBERT](https://huggingface.co/docs/transformers/model_doc/camembert)** (Inria/Facebook/Sorbonne 에서) Louis Martin*, Benjamin Muller*, Pedro Javier Ortiz Suárez*, Yoann Dupont, Laurent Romary, Éric Villemonte de la Clergerie, Djamé Seddah and Benoît Sagot 의 [CamemBERT: a Tasty French Language Model](https://arxiv.org/abs/1911.03894) 논문과 함께 발표했습니다.
|
||||||
1. **[Conditional DETR](https://huggingface.co/docs/transformers/model_doc/conditional_detr)** (from Microsoft Research Asia) released with the paper [Conditional DETR for Fast Training Convergence](https://arxiv.org/abs/2108.06152) by Depu Meng, Xiaokang Chen, Zejia Fan, Gang Zeng, Houqiang Li, Yuhui Yuan, Lei Sun, Jingdong Wang.
|
1. **[CANINE](https://huggingface.co/docs/transformers/model_doc/canine)** (Google Research 에서) Jonathan H. Clark, Dan Garrette, Iulia Turc, John Wieting 의 [CANINE: Pre-training an Efficient Tokenization-Free Encoder for Language Representation](https://arxiv.org/abs/2103.06874) 논문과 함께 발표했습니다.
|
||||||
1. **[ConvBERT](https://huggingface.co/docs/transformers/model_doc/convbert)** (from YituTech) released with the paper [ConvBERT: Improving BERT with Span-based Dynamic Convolution](https://arxiv.org/abs/2008.02496) by Zihang Jiang, Weihao Yu, Daquan Zhou, Yunpeng Chen, Jiashi Feng, Shuicheng Yan.
|
1. **[Chinese-CLIP](https://huggingface.co/docs/transformers/model_doc/chinese_clip)** (OFA-Sys 에서) An Yang, Junshu Pan, Junyang Lin, Rui Men, Yichang Zhang, Jingren Zhou, Chang Zhou 의 [Chinese CLIP: Contrastive Vision-Language Pretraining in Chinese](https://arxiv.org/abs/2211.01335) 논문과 함께 발표했습니다.
|
||||||
1. **[ConvNeXT](https://huggingface.co/docs/transformers/model_doc/convnext)** (from Facebook AI) released with the paper [A ConvNet for the 2020s](https://arxiv.org/abs/2201.03545) by Zhuang Liu, Hanzi Mao, Chao-Yuan Wu, Christoph Feichtenhofer, Trevor Darrell, Saining Xie.
|
1. **[CLAP](https://huggingface.co/docs/transformers/model_doc/clap)** (LAION-AI 에서 제공)은 Yusong Wu, Ke Chen, Tianyu Zhang, Yuchen Hui, Taylor Berg-Kirkpatrick, Shlomo Dubnov.의 [Large-scale Contrastive Language-Audio Pretraining with Feature Fusion and Keyword-to-Caption Augmentation](https://arxiv.org/abs/2211.06687)논문과 함께 발표했습니다.
|
||||||
1. **[CPM](https://huggingface.co/docs/transformers/model_doc/cpm)** (from Tsinghua University) released with the paper [CPM: A Large-scale Generative Chinese Pre-trained Language Model](https://arxiv.org/abs/2012.00413) by Zhengyan Zhang, Xu Han, Hao Zhou, Pei Ke, Yuxian Gu, Deming Ye, Yujia Qin, Yusheng Su, Haozhe Ji, Jian Guan, Fanchao Qi, Xiaozhi Wang, Yanan Zheng, Guoyang Zeng, Huanqi Cao, Shengqi Chen, Daixuan Li, Zhenbo Sun, Zhiyuan Liu, Minlie Huang, Wentao Han, Jie Tang, Juanzi Li, Xiaoyan Zhu, Maosong Sun.
|
1. **[CLIP](https://huggingface.co/docs/transformers/model_doc/clip)** (OpenAI 에서) Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, Ilya Sutskever 의 [Learning Transferable Visual Models From Natural Language Supervision](https://arxiv.org/abs/2103.00020) 논문과 함께 발표했습니다.
|
||||||
1. **[CTRL](https://huggingface.co/docs/transformers/model_doc/ctrl)** (from Salesforce) released with the paper [CTRL: A Conditional Transformer Language Model for Controllable Generation](https://arxiv.org/abs/1909.05858) by Nitish Shirish Keskar*, Bryan McCann*, Lav R. Varshney, Caiming Xiong and Richard Socher.
|
1. **[CLIPSeg](https://huggingface.co/docs/transformers/model_doc/clipseg)** (University of Göttingen 에서) Timo Lüddecke and Alexander Ecker 의 [Image Segmentation Using Text and Image Prompts](https://arxiv.org/abs/2112.10003) 논문과 함께 발표했습니다.
|
||||||
1. **[CvT](https://huggingface.co/docs/transformers/model_doc/cvt)** (from Microsoft) released with the paper [CvT: Introducing Convolutions to Vision Transformers](https://arxiv.org/abs/2103.15808) by Haiping Wu, Bin Xiao, Noel Codella, Mengchen Liu, Xiyang Dai, Lu Yuan, Lei Zhang.
|
1. **[CodeGen](https://huggingface.co/docs/transformers/model_doc/codegen)** (Salesforce 에서) Erik Nijkamp, Bo Pang, Hiroaki Hayashi, Lifu Tu, Huan Wang, Yingbo Zhou, Silvio Savarese, Caiming Xiong 의 [A Conversational Paradigm for Program Synthesis](https://arxiv.org/abs/2203.13474) 논문과 함께 발표했습니다.
|
||||||
1. **[Data2Vec](https://huggingface.co/docs/transformers/model_doc/data2vec)** (from Facebook) released with the paper [Data2Vec: A General Framework for Self-supervised Learning in Speech, Vision and Language](https://arxiv.org/abs/2202.03555) by Alexei Baevski, Wei-Ning Hsu, Qiantong Xu, Arun Babu, Jiatao Gu, Michael Auli.
|
1. **[Conditional DETR](https://huggingface.co/docs/transformers/model_doc/conditional_detr)** (Microsoft Research Asia 에서) Depu Meng, Xiaokang Chen, Zejia Fan, Gang Zeng, Houqiang Li, Yuhui Yuan, Lei Sun, Jingdong Wang 의 [Conditional DETR for Fast Training Convergence](https://arxiv.org/abs/2108.06152) 논문과 함께 발표했습니다.
|
||||||
1. **[DeBERTa](https://huggingface.co/docs/transformers/model_doc/deberta)** (from Microsoft) released with the paper [DeBERTa: Decoding-enhanced BERT with Disentangled Attention](https://arxiv.org/abs/2006.03654) by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen.
|
1. **[ConvBERT](https://huggingface.co/docs/transformers/model_doc/convbert)** (YituTech 에서) Zihang Jiang, Weihao Yu, Daquan Zhou, Yunpeng Chen, Jiashi Feng, Shuicheng Yan 의 [ConvBERT: Improving BERT with Span-based Dynamic Convolution](https://arxiv.org/abs/2008.02496) 논문과 함께 발표했습니다.
|
||||||
1. **[DeBERTa-v2](https://huggingface.co/docs/transformers/model_doc/deberta-v2)** (from Microsoft) released with the paper [DeBERTa: Decoding-enhanced BERT with Disentangled Attention](https://arxiv.org/abs/2006.03654) by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen.
|
1. **[ConvNeXT](https://huggingface.co/docs/transformers/model_doc/convnext)** (Facebook AI 에서) Zhuang Liu, Hanzi Mao, Chao-Yuan Wu, Christoph Feichtenhofer, Trevor Darrell, Saining Xie 의 [A ConvNet for the 2020s](https://arxiv.org/abs/2201.03545) 논문과 함께 발표했습니다.
|
||||||
1. **[Decision Transformer](https://huggingface.co/docs/transformers/model_doc/decision_transformer)** (from Berkeley/Facebook/Google) released with the paper [Decision Transformer: Reinforcement Learning via Sequence Modeling](https://arxiv.org/abs/2106.01345) by Lili Chen, Kevin Lu, Aravind Rajeswaran, Kimin Lee, Aditya Grover, Michael Laskin, Pieter Abbeel, Aravind Srinivas, Igor Mordatch.
|
1. **[ConvNeXTV2](https://huggingface.co/docs/transformers/model_doc/convnextv2)** (from Facebook AI) released with the paper [ConvNeXt V2: Co-designing and Scaling ConvNets with Masked Autoencoders](https://arxiv.org/abs/2301.00808) by Sanghyun Woo, Shoubhik Debnath, Ronghang Hu, Xinlei Chen, Zhuang Liu, In So Kweon, Saining Xie.
|
||||||
1. **[Deformable DETR](https://huggingface.co/docs/transformers/model_doc/deformable_detr)** (from SenseTime Research) released with the paper [Deformable DETR: Deformable Transformers for End-to-End Object Detection](https://arxiv.org/abs/2010.04159) by Xizhou Zhu, Weijie Su, Lewei Lu, Bin Li, Xiaogang Wang, Jifeng Dai.
|
1. **[CPM](https://huggingface.co/docs/transformers/model_doc/cpm)** (Tsinghua University 에서) Zhengyan Zhang, Xu Han, Hao Zhou, Pei Ke, Yuxian Gu, Deming Ye, Yujia Qin, Yusheng Su, Haozhe Ji, Jian Guan, Fanchao Qi, Xiaozhi Wang, Yanan Zheng, Guoyang Zeng, Huanqi Cao, Shengqi Chen, Daixuan Li, Zhenbo Sun, Zhiyuan Liu, Minlie Huang, Wentao Han, Jie Tang, Juanzi Li, Xiaoyan Zhu, Maosong Sun 의 [CPM: A Large-scale Generative Chinese Pre-trained Language Model](https://arxiv.org/abs/2012.00413) 논문과 함께 발표했습니다.
|
||||||
1. **[DeiT](https://huggingface.co/docs/transformers/model_doc/deit)** (from Facebook) released with the paper [Training data-efficient image transformers & distillation through attention](https://arxiv.org/abs/2012.12877) by Hugo Touvron, Matthieu Cord, Matthijs Douze, Francisco Massa, Alexandre Sablayrolles, Hervé Jégou.
|
1. **[CPM-Ant](https://huggingface.co/docs/transformers/model_doc/cpmant)** (from OpenBMB) released by the [OpenBMB](https://www.openbmb.org/).
|
||||||
1. **[DETR](https://huggingface.co/docs/transformers/model_doc/detr)** (from Facebook) released with the paper [End-to-End Object Detection with Transformers](https://arxiv.org/abs/2005.12872) by Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, Sergey Zagoruyko.
|
1. **[CTRL](https://huggingface.co/docs/transformers/model_doc/ctrl)** (Salesforce 에서) Nitish Shirish Keskar*, Bryan McCann*, Lav R. Varshney, Caiming Xiong and Richard Socher 의 [CTRL: A Conditional Transformer Language Model for Controllable Generation](https://arxiv.org/abs/1909.05858) 논문과 함께 발표했습니다.
|
||||||
1. **[DialoGPT](https://huggingface.co/docs/transformers/model_doc/dialogpt)** (from Microsoft Research) released with the paper [DialoGPT: Large-Scale Generative Pre-training for Conversational Response Generation](https://arxiv.org/abs/1911.00536) by Yizhe Zhang, Siqi Sun, Michel Galley, Yen-Chun Chen, Chris Brockett, Xiang Gao, Jianfeng Gao, Jingjing Liu, Bill Dolan.
|
1. **[CvT](https://huggingface.co/docs/transformers/model_doc/cvt)** (Microsoft 에서) Haiping Wu, Bin Xiao, Noel Codella, Mengchen Liu, Xiyang Dai, Lu Yuan, Lei Zhang 의 [CvT: Introducing Convolutions to Vision Transformers](https://arxiv.org/abs/2103.15808) 논문과 함께 발표했습니다.
|
||||||
1. **[DistilBERT](https://huggingface.co/docs/transformers/model_doc/distilbert)** (from HuggingFace), released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same method has been applied to compress GPT2 into [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation), Multilingual BERT into [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German version of DistilBERT.
|
1. **[Data2Vec](https://huggingface.co/docs/transformers/model_doc/data2vec)** (Facebook 에서) Alexei Baevski, Wei-Ning Hsu, Qiantong Xu, Arun Babu, Jiatao Gu, Michael Auli 의 [Data2Vec: A General Framework for Self-supervised Learning in Speech, Vision and Language](https://arxiv.org/abs/2202.03555) 논문과 함께 발표했습니다.
|
||||||
1. **[DiT](https://huggingface.co/docs/transformers/model_doc/dit)** (from Microsoft Research) released with the paper [DiT: Self-supervised Pre-training for Document Image Transformer](https://arxiv.org/abs/2203.02378) by Junlong Li, Yiheng Xu, Tengchao Lv, Lei Cui, Cha Zhang, Furu Wei.
|
1. **[DeBERTa](https://huggingface.co/docs/transformers/model_doc/deberta)** (Microsoft 에서) Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen 의 [DeBERTa: Decoding-enhanced BERT with Disentangled Attention](https://arxiv.org/abs/2006.03654) 논문과 함께 발표했습니다.
|
||||||
1. **[Donut](https://huggingface.co/docs/transformers/model_doc/donut)** (from NAVER) released with the paper [OCR-free Document Understanding Transformer](https://arxiv.org/abs/2111.15664) by Geewook Kim, Teakgyu Hong, Moonbin Yim, Jeongyeon Nam, Jinyoung Park, Jinyeong Yim, Wonseok Hwang, Sangdoo Yun, Dongyoon Han, Seunghyun Park.
|
1. **[DeBERTa-v2](https://huggingface.co/docs/transformers/model_doc/deberta-v2)** (Microsoft 에서) Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen 의 [DeBERTa: Decoding-enhanced BERT with Disentangled Attention](https://arxiv.org/abs/2006.03654) 논문과 함께 발표했습니다.
|
||||||
1. **[DPR](https://huggingface.co/docs/transformers/model_doc/dpr)** (from Facebook) released with the paper [Dense Passage Retrieval for Open-Domain Question Answering](https://arxiv.org/abs/2004.04906) by Vladimir Karpukhin, Barlas Oğuz, Sewon Min, Patrick Lewis, Ledell Wu, Sergey Edunov, Danqi Chen, and Wen-tau Yih.
|
1. **[Decision Transformer](https://huggingface.co/docs/transformers/model_doc/decision_transformer)** (Berkeley/Facebook/Google 에서) Lili Chen, Kevin Lu, Aravind Rajeswaran, Kimin Lee, Aditya Grover, Michael Laskin, Pieter Abbeel, Aravind Srinivas, Igor Mordatch 의 [Decision Transformer: Reinforcement Learning via Sequence Modeling](https://arxiv.org/abs/2106.01345) 논문과 함께 발표했습니다.
|
||||||
1. **[DPT](https://huggingface.co/docs/transformers/master/model_doc/dpt)** (from Intel Labs) released with the paper [Vision Transformers for Dense Prediction](https://arxiv.org/abs/2103.13413) by René Ranftl, Alexey Bochkovskiy, Vladlen Koltun.
|
1. **[Deformable DETR](https://huggingface.co/docs/transformers/model_doc/deformable_detr)** (SenseTime Research 에서) Xizhou Zhu, Weijie Su, Lewei Lu, Bin Li, Xiaogang Wang, Jifeng Dai 의 [Deformable DETR: Deformable Transformers for End-to-End Object Detection](https://arxiv.org/abs/2010.04159) 논문과 함께 발표했습니다.
|
||||||
1. **[ELECTRA](https://huggingface.co/docs/transformers/model_doc/electra)** (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang Luong, Quoc V. Le, Christopher D. Manning.
|
1. **[DeiT](https://huggingface.co/docs/transformers/model_doc/deit)** (Facebook 에서) Hugo Touvron, Matthieu Cord, Matthijs Douze, Francisco Massa, Alexandre Sablayrolles, Hervé Jégou 의 [Training data-efficient image transformers & distillation through attention](https://arxiv.org/abs/2012.12877) 논문과 함께 발표했습니다.
|
||||||
1. **[EncoderDecoder](https://huggingface.co/docs/transformers/model_doc/encoder-decoder)** (from Google Research) released with the paper [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://arxiv.org/abs/1907.12461) by Sascha Rothe, Shashi Narayan, Aliaksei Severyn.
|
1. **[DePlot](https://huggingface.co/docs/transformers/model_doc/deplot)** (Google AI 에서 제공)은 Fangyu Liu, Julian Martin Eisenschlos, Francesco Piccinno, Syrine Krichene, Chenxi Pang, Kenton Lee, Mandar Joshi, Wenhu Chen, Nigel Collier, Yasemin Altun.의 [DePlot: One-shot visual language reasoning by plot-to-table translation](https://arxiv.org/abs/2212.10505)논문과 함께 발표했습니다.
|
||||||
1. **[ERNIE](https://huggingface.co/docs/transformers/model_doc/ernie)** (from Baidu) released with the paper [ERNIE: Enhanced Representation through Knowledge Integration](https://arxiv.org/abs/1904.09223) by Yu Sun, Shuohuan Wang, Yukun Li, Shikun Feng, Xuyi Chen, Han Zhang, Xin Tian, Danxiang Zhu, Hao Tian, Hua Wu.
|
1. **[DETA](https://huggingface.co/docs/transformers/model_doc/deta)** (The University of Texas at Austin 에서 제공)은 Jeffrey Ouyang-Zhang, Jang Hyun Cho, Xingyi Zhou, Philipp Krähenbühl.의 [NMS Strikes Back](https://arxiv.org/abs/2212.06137)논문과 함께 발표했습니다.
|
||||||
|
1. **[DETR](https://huggingface.co/docs/transformers/model_doc/detr)** (Facebook 에서) Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, Sergey Zagoruyko 의 [End-to-End Object Detection with Transformers](https://arxiv.org/abs/2005.12872) 논문과 함께 발표했습니다.
|
||||||
|
1. **[DialoGPT](https://huggingface.co/docs/transformers/model_doc/dialogpt)** (Microsoft Research 에서) Yizhe Zhang, Siqi Sun, Michel Galley, Yen-Chun Chen, Chris Brockett, Xiang Gao, Jianfeng Gao, Jingjing Liu, Bill Dolan 의 [DialoGPT: Large-Scale Generative Pre-training for Conversational Response Generation](https://arxiv.org/abs/1911.00536) 논문과 함께 발표했습니다.
|
||||||
|
1. **[DiNAT](https://huggingface.co/docs/transformers/model_doc/dinat)** (SHI Labs 에서) Ali Hassani and Humphrey Shi 의 [Dilated Neighborhood Attention Transformer](https://arxiv.org/abs/2209.15001) 논문과 함께 발표했습니다.
|
||||||
|
1. **[DistilBERT](https://huggingface.co/docs/transformers/model_doc/distilbert)** (HuggingFace 에서) Victor Sanh, Lysandre Debut and Thomas Wolf. The same method has been applied to compress GPT2 into [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation), Multilingual BERT into [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German version of DistilBERT 의 [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter](https://arxiv.org/abs/1910.01108) 논문과 함께 발표했습니다.
|
||||||
|
1. **[DiT](https://huggingface.co/docs/transformers/model_doc/dit)** (Microsoft Research 에서) Junlong Li, Yiheng Xu, Tengchao Lv, Lei Cui, Cha Zhang, Furu Wei 의 [DiT: Self-supervised Pre-training for Document Image Transformer](https://arxiv.org/abs/2203.02378) 논문과 함께 발표했습니다.
|
||||||
|
1. **[Donut](https://huggingface.co/docs/transformers/model_doc/donut)** (NAVER 에서) Geewook Kim, Teakgyu Hong, Moonbin Yim, Jeongyeon Nam, Jinyoung Park, Jinyeong Yim, Wonseok Hwang, Sangdoo Yun, Dongyoon Han, Seunghyun Park 의 [OCR-free Document Understanding Transformer](https://arxiv.org/abs/2111.15664) 논문과 함께 발표했습니다.
|
||||||
|
1. **[DPR](https://huggingface.co/docs/transformers/model_doc/dpr)** (Facebook 에서) Vladimir Karpukhin, Barlas Oğuz, Sewon Min, Patrick Lewis, Ledell Wu, Sergey Edunov, Danqi Chen, and Wen-tau Yih 의 [Dense Passage Retrieval for Open-Domain Question Answering](https://arxiv.org/abs/2004.04906) 논문과 함께 발표했습니다.
|
||||||
|
1. **[DPT](https://huggingface.co/docs/transformers/master/model_doc/dpt)** (Intel Labs 에서) René Ranftl, Alexey Bochkovskiy, Vladlen Koltun 의 [Vision Transformers for Dense Prediction](https://arxiv.org/abs/2103.13413) 논문과 함께 발표했습니다.
|
||||||
|
1. **[EfficientFormer](https://huggingface.co/docs/transformers/model_doc/efficientformer)** (from Snap Research) released with the paper [EfficientFormer: Vision Transformers at MobileNetSpeed](https://arxiv.org/abs/2206.01191) by Yanyu Li, Geng Yuan, Yang Wen, Ju Hu, Georgios Evangelidis, Sergey Tulyakov, Yanzhi Wang, Jian Ren.
|
||||||
|
1. **[EfficientNet](https://huggingface.co/docs/transformers/model_doc/efficientnet)** (from Google Brain) released with the paper [EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks](https://arxiv.org/abs/1905.11946) by Mingxing Tan, Quoc V. Le.
|
||||||
|
1. **[ELECTRA](https://huggingface.co/docs/transformers/model_doc/electra)** (Google Research/Stanford University 에서) Kevin Clark, Minh-Thang Luong, Quoc V. Le, Christopher D. Manning 의 [ELECTRA: Pre-training text encoders as discriminators rather than generators](https://arxiv.org/abs/2003.10555) 논문과 함께 발표했습니다.
|
||||||
|
1. **[EnCodec](https://huggingface.co/docs/transformers/model_doc/encodec)** (Meta AI 에서 제공)은 Alexandre Défossez, Jade Copet, Gabriel Synnaeve, Yossi Adi.의 [High Fidelity Neural Audio Compression](https://arxiv.org/abs/2210.13438)논문과 함께 발표했습니다.
|
||||||
|
1. **[EncoderDecoder](https://huggingface.co/docs/transformers/model_doc/encoder-decoder)** (Google Research 에서) Sascha Rothe, Shashi Narayan, Aliaksei Severyn 의 [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://arxiv.org/abs/1907.12461) 논문과 함께 발표했습니다.
|
||||||
|
1. **[ERNIE](https://huggingface.co/docs/transformers/model_doc/ernie)** (Baidu 에서) Yu Sun, Shuohuan Wang, Yukun Li, Shikun Feng, Xuyi Chen, Han Zhang, Xin Tian, Danxiang Zhu, Hao Tian, Hua Wu 의 [ERNIE: Enhanced Representation through Knowledge Integration](https://arxiv.org/abs/1904.09223) 논문과 함께 발표했습니다.
|
||||||
|
1. **[ErnieM](https://huggingface.co/docs/transformers/model_doc/ernie_m)** (Baidu 에서 제공)은 Xuan Ouyang, Shuohuan Wang, Chao Pang, Yu Sun, Hao Tian, Hua Wu, Haifeng Wang.의 [ERNIE-M: Enhanced Multilingual Representation by Aligning Cross-lingual Semantics with Monolingual Corpora](https://arxiv.org/abs/2012.15674)논문과 함께 발표했습니다.
|
||||||
1. **[ESM](https://huggingface.co/docs/transformers/model_doc/esm)** (from Meta AI) are transformer protein language models. **ESM-1b** was released with the paper [Biological structure and function emerge from scaling unsupervised learning to 250 million protein sequences](https://www.pnas.org/content/118/15/e2016239118) by Alexander Rives, Joshua Meier, Tom Sercu, Siddharth Goyal, Zeming Lin, Jason Liu, Demi Guo, Myle Ott, C. Lawrence Zitnick, Jerry Ma, and Rob Fergus. **ESM-1v** was released with the paper [Language models enable zero-shot prediction of the effects of mutations on protein function](https://doi.org/10.1101/2021.07.09.450648) by Joshua Meier, Roshan Rao, Robert Verkuil, Jason Liu, Tom Sercu and Alexander Rives. **ESM-2** was released with the paper [Language models of protein sequences at the scale of evolution enable accurate structure prediction](https://doi.org/10.1101/2022.07.20.500902) by Zeming Lin, Halil Akin, Roshan Rao, Brian Hie, Zhongkai Zhu, Wenting Lu, Allan dos Santos Costa, Maryam Fazel-Zarandi, Tom Sercu, Sal Candido, Alexander Rives.
|
1. **[ESM](https://huggingface.co/docs/transformers/model_doc/esm)** (from Meta AI) are transformer protein language models. **ESM-1b** was released with the paper [Biological structure and function emerge from scaling unsupervised learning to 250 million protein sequences](https://www.pnas.org/content/118/15/e2016239118) by Alexander Rives, Joshua Meier, Tom Sercu, Siddharth Goyal, Zeming Lin, Jason Liu, Demi Guo, Myle Ott, C. Lawrence Zitnick, Jerry Ma, and Rob Fergus. **ESM-1v** was released with the paper [Language models enable zero-shot prediction of the effects of mutations on protein function](https://doi.org/10.1101/2021.07.09.450648) by Joshua Meier, Roshan Rao, Robert Verkuil, Jason Liu, Tom Sercu and Alexander Rives. **ESM-2** was released with the paper [Language models of protein sequences at the scale of evolution enable accurate structure prediction](https://doi.org/10.1101/2022.07.20.500902) by Zeming Lin, Halil Akin, Roshan Rao, Brian Hie, Zhongkai Zhu, Wenting Lu, Allan dos Santos Costa, Maryam Fazel-Zarandi, Tom Sercu, Sal Candido, Alexander Rives.
|
||||||
|
1. **[Falcon](https://huggingface.co/docs/transformers/model_doc/falcon)** (from Technology Innovation Institute) by Almazrouei, Ebtesam and Alobeidli, Hamza and Alshamsi, Abdulaziz and Cappelli, Alessandro and Cojocaru, Ruxandra and Debbah, Merouane and Goffinet, Etienne and Heslow, Daniel and Launay, Julien and Malartic, Quentin and Noune, Badreddine and Pannier, Baptiste and Penedo, Guilherme.
|
||||||
|
1. **[FLAN-T5](https://huggingface.co/docs/transformers/model_doc/flan-t5)** (from Google AI) released in the repository [google-research/t5x](https://github.com/google-research/t5x/blob/main/docs/models.md#flan-t5-checkpoints) by Hyung Won Chung, Le Hou, Shayne Longpre, Barret Zoph, Yi Tay, William Fedus, Eric Li, Xuezhi Wang, Mostafa Dehghani, Siddhartha Brahma, Albert Webson, Shixiang Shane Gu, Zhuyun Dai, Mirac Suzgun, Xinyun Chen, Aakanksha Chowdhery, Sharan Narang, Gaurav Mishra, Adams Yu, Vincent Zhao, Yanping Huang, Andrew Dai, Hongkun Yu, Slav Petrov, Ed H. Chi, Jeff Dean, Jacob Devlin, Adam Roberts, Denny Zhou, Quoc V. Le, and Jason Wei
|
||||||
|
1. **[FLAN-UL2](https://huggingface.co/docs/transformers/model_doc/flan-ul2)** (from Google AI) released in the repository [google-research/t5x](https://github.com/google-research/t5x/blob/main/docs/models.md#flan-ul2-checkpoints) by Hyung Won Chung, Le Hou, Shayne Longpre, Barret Zoph, Yi Tay, William Fedus, Eric Li, Xuezhi Wang, Mostafa Dehghani, Siddhartha Brahma, Albert Webson, Shixiang Shane Gu, Zhuyun Dai, Mirac Suzgun, Xinyun Chen, Aakanksha Chowdhery, Sharan Narang, Gaurav Mishra, Adams Yu, Vincent Zhao, Yanping Huang, Andrew Dai, Hongkun Yu, Slav Petrov, Ed H. Chi, Jeff Dean, Jacob Devlin, Adam Roberts, Denny Zhou, Quoc V. Le, and Jason Wei
|
||||||
1. **[FlauBERT](https://huggingface.co/docs/transformers/model_doc/flaubert)** (from CNRS) released with the paper [FlauBERT: Unsupervised Language Model Pre-training for French](https://arxiv.org/abs/1912.05372) by Hang Le, Loïc Vial, Jibril Frej, Vincent Segonne, Maximin Coavoux, Benjamin Lecouteux, Alexandre Allauzen, Benoît Crabbé, Laurent Besacier, Didier Schwab.
|
1. **[FlauBERT](https://huggingface.co/docs/transformers/model_doc/flaubert)** (from CNRS) released with the paper [FlauBERT: Unsupervised Language Model Pre-training for French](https://arxiv.org/abs/1912.05372) by Hang Le, Loïc Vial, Jibril Frej, Vincent Segonne, Maximin Coavoux, Benjamin Lecouteux, Alexandre Allauzen, Benoît Crabbé, Laurent Besacier, Didier Schwab.
|
||||||
1. **[FLAVA](https://huggingface.co/docs/transformers/model_doc/flava)** (from Facebook AI) released with the paper [FLAVA: A Foundational Language And Vision Alignment Model](https://arxiv.org/abs/2112.04482) by Amanpreet Singh, Ronghang Hu, Vedanuj Goswami, Guillaume Couairon, Wojciech Galuba, Marcus Rohrbach, and Douwe Kiela.
|
1. **[FLAVA](https://huggingface.co/docs/transformers/model_doc/flava)** (from Facebook AI) released with the paper [FLAVA: A Foundational Language And Vision Alignment Model](https://arxiv.org/abs/2112.04482) by Amanpreet Singh, Ronghang Hu, Vedanuj Goswami, Guillaume Couairon, Wojciech Galuba, Marcus Rohrbach, and Douwe Kiela.
|
||||||
1. **[FNet](https://huggingface.co/docs/transformers/model_doc/fnet)** (from Google Research) released with the paper [FNet: Mixing Tokens with Fourier Transforms](https://arxiv.org/abs/2105.03824) by James Lee-Thorp, Joshua Ainslie, Ilya Eckstein, Santiago Ontanon.
|
1. **[FNet](https://huggingface.co/docs/transformers/model_doc/fnet)** (from Google Research) released with the paper [FNet: Mixing Tokens with Fourier Transforms](https://arxiv.org/abs/2105.03824) by James Lee-Thorp, Joshua Ainslie, Ilya Eckstein, Santiago Ontanon.
|
||||||
|
1. **[FocalNet](https://huggingface.co/docs/transformers/model_doc/focalnet)** (from Microsoft Research) released with the paper [Focal Modulation Networks](https://arxiv.org/abs/2203.11926) by Jianwei Yang, Chunyuan Li, Xiyang Dai, Lu Yuan, Jianfeng Gao.
|
||||||
1. **[Funnel Transformer](https://huggingface.co/docs/transformers/model_doc/funnel)** (from CMU/Google Brain) released with the paper [Funnel-Transformer: Filtering out Sequential Redundancy for Efficient Language Processing](https://arxiv.org/abs/2006.03236) by Zihang Dai, Guokun Lai, Yiming Yang, Quoc V. Le.
|
1. **[Funnel Transformer](https://huggingface.co/docs/transformers/model_doc/funnel)** (from CMU/Google Brain) released with the paper [Funnel-Transformer: Filtering out Sequential Redundancy for Efficient Language Processing](https://arxiv.org/abs/2006.03236) by Zihang Dai, Guokun Lai, Yiming Yang, Quoc V. Le.
|
||||||
|
1. **[GIT](https://huggingface.co/docs/transformers/model_doc/git)** (from Microsoft Research) released with the paper [GIT: A Generative Image-to-text Transformer for Vision and Language](https://arxiv.org/abs/2205.14100) by Jianfeng Wang, Zhengyuan Yang, Xiaowei Hu, Linjie Li, Kevin Lin, Zhe Gan, Zicheng Liu, Ce Liu, Lijuan Wang.
|
||||||
1. **[GLPN](https://huggingface.co/docs/transformers/model_doc/glpn)** (from KAIST) released with the paper [Global-Local Path Networks for Monocular Depth Estimation with Vertical CutDepth](https://arxiv.org/abs/2201.07436) by Doyeon Kim, Woonghyun Ga, Pyungwhan Ahn, Donggyu Joo, Sehwan Chun, Junmo Kim.
|
1. **[GLPN](https://huggingface.co/docs/transformers/model_doc/glpn)** (from KAIST) released with the paper [Global-Local Path Networks for Monocular Depth Estimation with Vertical CutDepth](https://arxiv.org/abs/2201.07436) by Doyeon Kim, Woonghyun Ga, Pyungwhan Ahn, Donggyu Joo, Sehwan Chun, Junmo Kim.
|
||||||
1. **[GPT](https://huggingface.co/docs/transformers/model_doc/openai-gpt)** (from OpenAI) released with the paper [Improving Language Understanding by Generative Pre-Training](https://blog.openai.com/language-unsupervised/) by Alec Radford, Karthik Narasimhan, Tim Salimans and Ilya Sutskever.
|
1. **[GPT](https://huggingface.co/docs/transformers/model_doc/openai-gpt)** (from OpenAI) released with the paper [Improving Language Understanding by Generative Pre-Training](https://blog.openai.com/language-unsupervised/) by Alec Radford, Karthik Narasimhan, Tim Salimans and Ilya Sutskever.
|
||||||
1. **[GPT Neo](https://huggingface.co/docs/transformers/model_doc/gpt_neo)** (from EleutherAI) released in the repository [EleutherAI/gpt-neo](https://github.com/EleutherAI/gpt-neo) by Sid Black, Stella Biderman, Leo Gao, Phil Wang and Connor Leahy.
|
1. **[GPT Neo](https://huggingface.co/docs/transformers/model_doc/gpt_neo)** (from EleutherAI) released in the repository [EleutherAI/gpt-neo](https://github.com/EleutherAI/gpt-neo) by Sid Black, Stella Biderman, Leo Gao, Phil Wang and Connor Leahy.
|
||||||
1. **[GPT NeoX](https://huggingface.co/docs/transformers/model_doc/gpt_neox)** (from EleutherAI) released with the paper [GPT-NeoX-20B: An Open-Source Autoregressive Language Model](https://arxiv.org/abs/2204.06745) by Sid Black, Stella Biderman, Eric Hallahan, Quentin Anthony, Leo Gao, Laurence Golding, Horace He, Connor Leahy, Kyle McDonell, Jason Phang, Michael Pieler, USVSN Sai Prashanth, Shivanshu Purohit, Laria Reynolds, Jonathan Tow, Ben Wang, Samuel Weinbach
|
1. **[GPT NeoX](https://huggingface.co/docs/transformers/model_doc/gpt_neox)** (EleutherAI 에서) Sid Black, Stella Biderman, Eric Hallahan, Quentin Anthony, Leo Gao, Laurence Golding, Horace He, Connor Leahy, Kyle McDonell, Jason Phang, Michael Pieler, USVSN Sai Prashanth, Shivanshu Purohit, Laria Reynolds, Jonathan Tow, Ben Wang, Samuel Weinbac 의 [GPT-NeoX-20B: An Open-Source Autoregressive Language Model](https://arxiv.org/abs/2204.06745) 논문과 함께 발표했습니다.
|
||||||
1. **[GPT NeoX Japanese](https://huggingface.co/docs/transformers/model_doc/gpt_neox_japanese)** (from ABEJA) released by Shinya Otani, Takayoshi Makabe, Anuj Arora, and Kyo Hattori.
|
1. **[GPT NeoX Japanese](https://huggingface.co/docs/transformers/model_doc/gpt_neox_japanese)** (from ABEJA) released by Shinya Otani, Takayoshi Makabe, Anuj Arora, and Kyo Hattori.
|
||||||
1. **[GPT-2](https://huggingface.co/docs/transformers/model_doc/gpt2)** (from OpenAI) released with the paper [Language Models are Unsupervised Multitask Learners](https://blog.openai.com/better-language-models/) by Alec Radford*, Jeffrey Wu*, Rewon Child, David Luan, Dario Amodei** and Ilya Sutskever**.
|
1. **[GPT-2](https://huggingface.co/docs/transformers/model_doc/gpt2)** (OpenAI 에서) Alec Radford*, Jeffrey Wu*, Rewon Child, David Luan, Dario Amodei** and Ilya Sutskever** 의 [Language Models are Unsupervised Multitask Learners](https://blog.openai.com/better-language-models/) 논문과 함께 발표했습니다.
|
||||||
1. **[GPT-J](https://huggingface.co/docs/transformers/model_doc/gptj)** (from EleutherAI) released in the repository [kingoflolz/mesh-transformer-jax](https://github.com/kingoflolz/mesh-transformer-jax/) by Ben Wang and Aran Komatsuzaki.
|
1. **[GPT-J](https://huggingface.co/docs/transformers/model_doc/gptj)** (from EleutherAI) released in the repository [kingoflolz/mesh-transformer-jax](https://github.com/kingoflolz/mesh-transformer-jax/) by Ben Wang and Aran Komatsuzaki.
|
||||||
1. **[GroupViT](https://huggingface.co/docs/transformers/model_doc/groupvit)** (from UCSD, NVIDIA) released with the paper [GroupViT: Semantic Segmentation Emerges from Text Supervision](https://arxiv.org/abs/2202.11094) by Jiarui Xu, Shalini De Mello, Sifei Liu, Wonmin Byeon, Thomas Breuel, Jan Kautz, Xiaolong Wang.
|
1. **[GPT-Sw3](https://huggingface.co/docs/transformers/model_doc/gpt-sw3)** (AI-Sweden 에서) Ariel Ekgren, Amaru Cuba Gyllensten, Evangelia Gogoulou, Alice Heiman, Severine Verlinden, Joey Öhman, Fredrik Carlsson, Magnus Sahlgren. 의 [Lessons Learned from GPT-SW3: Building the First Large-Scale Generative Language Model for Swedish](http://www.lrec-conf.org/proceedings/lrec2022/pdf/2022.lrec-1.376.pdf) 논문과 함께 발표했습니다.
|
||||||
1. **[Hubert](https://huggingface.co/docs/transformers/model_doc/hubert)** (from Facebook) released with the paper [HuBERT: Self-Supervised Speech Representation Learning by Masked Prediction of Hidden Units](https://arxiv.org/abs/2106.07447) by Wei-Ning Hsu, Benjamin Bolte, Yao-Hung Hubert Tsai, Kushal Lakhotia, Ruslan Salakhutdinov, Abdelrahman Mohamed.
|
1. **[GPTBigCode](https://huggingface.co/docs/transformers/model_doc/gpt_bigcode)** (BigCode 에서 제공)은 Loubna Ben Allal, Raymond Li, Denis Kocetkov, Chenghao Mou, Christopher Akiki, Carlos Munoz Ferrandis, Niklas Muennighoff, Mayank Mishra, Alex Gu, Manan Dey, Logesh Kumar Umapathi, Carolyn Jane Anderson, Yangtian Zi, Joel Lamy Poirier, Hailey Schoelkopf, Sergey Troshin, Dmitry Abulkhanov, Manuel Romero, Michael Lappert, Francesco De Toni, Bernardo García del Río, Qian Liu, Shamik Bose, Urvashi Bhattacharyya, Terry Yue Zhuo, Ian Yu, Paulo Villegas, Marco Zocca, Sourab Mangrulkar, David Lansky, Huu Nguyen, Danish Contractor, Luis Villa, Jia Li, Dzmitry Bahdanau, Yacine Jernite, Sean Hughes, Daniel Fried, Arjun Guha, Harm de Vries, Leandro von Werra.의 [SantaCoder: don't reach for the stars!](https://arxiv.org/abs/2301.03988)논문과 함께 발표했습니다.
|
||||||
1. **[I-BERT](https://huggingface.co/docs/transformers/model_doc/ibert)** (from Berkeley) released with the paper [I-BERT: Integer-only BERT Quantization](https://arxiv.org/abs/2101.01321) by Sehoon Kim, Amir Gholami, Zhewei Yao, Michael W. Mahoney, Kurt Keutzer.
|
1. **[GPTSAN-japanese](https://huggingface.co/docs/transformers/model_doc/gptsan-japanese)** released in the repository [tanreinama/GPTSAN](https://github.com/tanreinama/GPTSAN/blob/main/report/model.md) by Toshiyuki Sakamoto(tanreinama).
|
||||||
1. **[ImageGPT](https://huggingface.co/docs/transformers/model_doc/imagegpt)** (from OpenAI) released with the paper [Generative Pretraining from Pixels](https://openai.com/blog/image-gpt/) by Mark Chen, Alec Radford, Rewon Child, Jeffrey Wu, Heewoo Jun, David Luan, Ilya Sutskever.
|
1. **[Graphormer](https://huggingface.co/docs/transformers/model_doc/graphormer)** (from Microsoft) Chengxuan Ying, Tianle Cai, Shengjie Luo, Shuxin Zheng, Guolin Ke, Di He, Yanming Shen, Tie-Yan Liu 의 [Do Transformers Really Perform Bad for Graph Representation?](https://arxiv.org/abs/2106.05234) 논문과 함께 발표했습니다.
|
||||||
1. **[LayoutLM](https://huggingface.co/docs/transformers/model_doc/layoutlm)** (from Microsoft Research Asia) released with the paper [LayoutLM: Pre-training of Text and Layout for Document Image Understanding](https://arxiv.org/abs/1912.13318) by Yiheng Xu, Minghao Li, Lei Cui, Shaohan Huang, Furu Wei, Ming Zhou.
|
1. **[GroupViT](https://huggingface.co/docs/transformers/model_doc/groupvit)** (UCSD, NVIDIA 에서) Jiarui Xu, Shalini De Mello, Sifei Liu, Wonmin Byeon, Thomas Breuel, Jan Kautz, Xiaolong Wang 의 [GroupViT: Semantic Segmentation Emerges from Text Supervision](https://arxiv.org/abs/2202.11094) 논문과 함께 발표했습니다.
|
||||||
1. **[LayoutLMv2](https://huggingface.co/docs/transformers/model_doc/layoutlmv2)** (from Microsoft Research Asia) released with the paper [LayoutLMv2: Multi-modal Pre-training for Visually-Rich Document Understanding](https://arxiv.org/abs/2012.14740) by Yang Xu, Yiheng Xu, Tengchao Lv, Lei Cui, Furu Wei, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Wanxiang Che, Min Zhang, Lidong Zhou.
|
1. **[Hubert](https://huggingface.co/docs/transformers/model_doc/hubert)** (Facebook 에서) Wei-Ning Hsu, Benjamin Bolte, Yao-Hung Hubert Tsai, Kushal Lakhotia, Ruslan Salakhutdinov, Abdelrahman Mohamed 의 [HuBERT: Self-Supervised Speech Representation Learning by Masked Prediction of Hidden Units](https://arxiv.org/abs/2106.07447) 논문과 함께 발표했습니다.
|
||||||
1. **[LayoutLMv3](https://huggingface.co/docs/transformers/model_doc/layoutlmv3)** (from Microsoft Research Asia) released with the paper [LayoutLMv3: Pre-training for Document AI with Unified Text and Image Masking](https://arxiv.org/abs/2204.08387) by Yupan Huang, Tengchao Lv, Lei Cui, Yutong Lu, Furu Wei.
|
1. **[I-BERT](https://huggingface.co/docs/transformers/model_doc/ibert)** (Berkeley 에서) Sehoon Kim, Amir Gholami, Zhewei Yao, Michael W. Mahoney, Kurt Keutzer 의 [I-BERT: Integer-only BERT Quantization](https://arxiv.org/abs/2101.01321) 논문과 함께 발표했습니다.
|
||||||
1. **[LayoutXLM](https://huggingface.co/docs/transformers/model_doc/layoutxlm)** (from Microsoft Research Asia) released with the paper [LayoutXLM: Multimodal Pre-training for Multilingual Visually-rich Document Understanding](https://arxiv.org/abs/2104.08836) by Yiheng Xu, Tengchao Lv, Lei Cui, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Furu Wei.
|
1. **[ImageGPT](https://huggingface.co/docs/transformers/model_doc/imagegpt)** (OpenAI 에서) Mark Chen, Alec Radford, Rewon Child, Jeffrey Wu, Heewoo Jun, David Luan, Ilya Sutskever 의 [Generative Pretraining from Pixels](https://openai.com/blog/image-gpt/) 논문과 함께 발표했습니다.
|
||||||
1. **[LED](https://huggingface.co/docs/transformers/model_doc/led)** (from AllenAI) released with the paper [Longformer: The Long-Document Transformer](https://arxiv.org/abs/2004.05150) by Iz Beltagy, Matthew E. Peters, Arman Cohan.
|
1. **[Informer](https://huggingface.co/docs/transformers/model_doc/informer)** (from Beihang University, UC Berkeley, Rutgers University, SEDD Company) released with the paper [Informer: Beyond Efficient Transformer for Long Sequence Time-Series Forecasting](https://arxiv.org/abs/2012.07436) by Haoyi Zhou, Shanghang Zhang, Jieqi Peng, Shuai Zhang, Jianxin Li, Hui Xiong, and Wancai Zhang.
|
||||||
1. **[LeViT](https://huggingface.co/docs/transformers/model_doc/levit)** (from Meta AI) released with the paper [LeViT: A Vision Transformer in ConvNet's Clothing for Faster Inference](https://arxiv.org/abs/2104.01136) by Ben Graham, Alaaeldin El-Nouby, Hugo Touvron, Pierre Stock, Armand Joulin, Hervé Jégou, Matthijs Douze.
|
1. **[InstructBLIP](https://huggingface.co/docs/transformers/model_doc/instructblip)** (Salesforce 에서 제공)은 Wenliang Dai, Junnan Li, Dongxu Li, Anthony Meng Huat Tiong, Junqi Zhao, Weisheng Wang, Boyang Li, Pascale Fung, Steven Hoi.의 [InstructBLIP: Towards General-purpose Vision-Language Models with Instruction Tuning](https://arxiv.org/abs/2305.06500)논문과 함께 발표했습니다.
|
||||||
1. **[LiLT](https://huggingface.co/docs/transformers/main/model_doc/lilt)** (from South China University of Technology) released with the paper [LiLT: A Simple yet Effective Language-Independent Layout Transformer for Structured Document Understanding](https://arxiv.org/abs/2202.13669) by Jiapeng Wang, Lianwen Jin, Kai Ding.
|
1. **[Jukebox](https://huggingface.co/docs/transformers/model_doc/jukebox)** (OpenAI 에서) Prafulla Dhariwal, Heewoo Jun, Christine Payne, Jong Wook Kim, Alec Radford, Ilya Sutskever 의 [Jukebox: A Generative Model for Music](https://arxiv.org/pdf/2005.00341.pdf) 논문과 함께 발표했습니다.
|
||||||
1. **[Longformer](https://huggingface.co/docs/transformers/model_doc/longformer)** (from AllenAI) released with the paper [Longformer: The Long-Document Transformer](https://arxiv.org/abs/2004.05150) by Iz Beltagy, Matthew E. Peters, Arman Cohan.
|
1. **[LayoutLM](https://huggingface.co/docs/transformers/model_doc/layoutlm)** (Microsoft Research Asia 에서) Yiheng Xu, Minghao Li, Lei Cui, Shaohan Huang, Furu Wei, Ming Zhou 의 [LayoutLM: Pre-training of Text and Layout for Document Image Understanding](https://arxiv.org/abs/1912.13318) 논문과 함께 발표했습니다.
|
||||||
1. **[LongT5](https://huggingface.co/docs/transformers/model_doc/longt5)** (from Google AI) released with the paper [LongT5: Efficient Text-To-Text Transformer for Long Sequences](https://arxiv.org/abs/2112.07916) by Mandy Guo, Joshua Ainslie, David Uthus, Santiago Ontanon, Jianmo Ni, Yun-Hsuan Sung, Yinfei Yang.
|
1. **[LayoutLMv2](https://huggingface.co/docs/transformers/model_doc/layoutlmv2)** (Microsoft Research Asia 에서) Yang Xu, Yiheng Xu, Tengchao Lv, Lei Cui, Furu Wei, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Wanxiang Che, Min Zhang, Lidong Zhou 의 [LayoutLMv2: Multi-modal Pre-training for Visually-Rich Document Understanding](https://arxiv.org/abs/2012.14740) 논문과 함께 발표했습니다.
|
||||||
1. **[LUKE](https://huggingface.co/docs/transformers/model_doc/luke)** (from Studio Ousia) released with the paper [LUKE: Deep Contextualized Entity Representations with Entity-aware Self-attention](https://arxiv.org/abs/2010.01057) by Ikuya Yamada, Akari Asai, Hiroyuki Shindo, Hideaki Takeda, Yuji Matsumoto.
|
1. **[LayoutLMv3](https://huggingface.co/docs/transformers/model_doc/layoutlmv3)** (Microsoft Research Asia 에서) Yupan Huang, Tengchao Lv, Lei Cui, Yutong Lu, Furu Wei 의 [LayoutLMv3: Pre-training for Document AI with Unified Text and Image Masking](https://arxiv.org/abs/2204.08387) 논문과 함께 발표했습니다.
|
||||||
1. **[LXMERT](https://huggingface.co/docs/transformers/model_doc/lxmert)** (from UNC Chapel Hill) released with the paper [LXMERT: Learning Cross-Modality Encoder Representations from Transformers for Open-Domain Question Answering](https://arxiv.org/abs/1908.07490) by Hao Tan and Mohit Bansal.
|
1. **[LayoutXLM](https://huggingface.co/docs/transformers/model_doc/layoutxlm)** (Microsoft Research Asia 에서) Yiheng Xu, Tengchao Lv, Lei Cui, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Furu Wei 의 [LayoutXLM: Multimodal Pre-training for Multilingual Visually-rich Document Understanding](https://arxiv.org/abs/2104.08836) 논문과 함께 발표했습니다.
|
||||||
1. **[M-CTC-T](https://huggingface.co/docs/transformers/model_doc/mctct)** (from Facebook) released with the paper [Pseudo-Labeling For Massively Multilingual Speech Recognition](https://arxiv.org/abs/2111.00161) by Loren Lugosch, Tatiana Likhomanenko, Gabriel Synnaeve, and Ronan Collobert.
|
1. **[LED](https://huggingface.co/docs/transformers/model_doc/led)** (AllenAI 에서) Iz Beltagy, Matthew E. Peters, Arman Cohan 의 [Longformer: The Long-Document Transformer](https://arxiv.org/abs/2004.05150) 논문과 함께 발표했습니다.
|
||||||
1. **[M2M100](https://huggingface.co/docs/transformers/model_doc/m2m_100)** (from Facebook) released with the paper [Beyond English-Centric Multilingual Machine Translation](https://arxiv.org/abs/2010.11125) by Angela Fan, Shruti Bhosale, Holger Schwenk, Zhiyi Ma, Ahmed El-Kishky, Siddharth Goyal, Mandeep Baines, Onur Celebi, Guillaume Wenzek, Vishrav Chaudhary, Naman Goyal, Tom Birch, Vitaliy Liptchinsky, Sergey Edunov, Edouard Grave, Michael Auli, Armand Joulin.
|
1. **[LeViT](https://huggingface.co/docs/transformers/model_doc/levit)** (Meta AI 에서) Ben Graham, Alaaeldin El-Nouby, Hugo Touvron, Pierre Stock, Armand Joulin, Hervé Jégou, Matthijs Douze 의 [LeViT: A Vision Transformer in ConvNet's Clothing for Faster Inference](https://arxiv.org/abs/2104.01136) 논문과 함께 발표했습니다.
|
||||||
|
1. **[LiLT](https://huggingface.co/docs/transformers/model_doc/lilt)** (South China University of Technology 에서) Jiapeng Wang, Lianwen Jin, Kai Ding 의 [LiLT: A Simple yet Effective Language-Independent Layout Transformer for Structured Document Understanding](https://arxiv.org/abs/2202.13669) 논문과 함께 발표했습니다.
|
||||||
|
1. **[LLaMA](https://huggingface.co/docs/transformers/model_doc/llama)** (The FAIR team of Meta AI 에서 제공)은 Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timothée Lacroix, Baptiste Rozière, Naman Goyal, Eric Hambro, Faisal Azhar, Aurelien Rodriguez, Armand Joulin, Edouard Grave, Guillaume Lample.의 [LLaMA: Open and Efficient Foundation Language Models](https://arxiv.org/abs/2302.13971)논문과 함께 발표했습니다.
|
||||||
|
1. **[Llama2](https://huggingface.co/docs/transformers/model_doc/llama2)** (The FAIR team of Meta AI 에서 제공)은 Hugo Touvron, Louis Martin, Kevin Stone, Peter Albert, Amjad Almahairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajjwal Bhargava, Shruti Bhosale, Dan Bikel, Lukas Blecher, Cristian Canton Ferrer, Moya Chen, Guillem Cucurull, David Esiobu, Jude Fernandes, Jeremy Fu, Wenyin Fu, Brian Fuller, Cynthia Gao, Vedanuj Goswami, Naman Goyal, Anthony Hartshorn, Saghar Hosseini, Rui Hou, Hakan Inan, Marcin Kardas, Viktor Kerkez Madian Khabsa, Isabel Kloumann, Artem Korenev, Punit Singh Koura, Marie-Anne Lachaux, Thibaut Lavril, Jenya Lee, Diana Liskovich, Yinghai Lu, Yuning Mao, Xavier Martinet, Todor Mihaylov, Pushka rMishra, Igor Molybog, Yixin Nie, Andrew Poulton, Jeremy Reizenstein, Rashi Rungta, Kalyan Saladi, Alan Schelten, Ruan Silva, Eric Michael Smith, Ranjan Subramanian, Xiaoqing EllenTan, Binh Tang, Ross Taylor, Adina Williams, Jian Xiang Kuan, Puxin Xu, Zheng Yan, Iliyan Zarov, Yuchen Zhang, Angela Fan, Melanie Kambadur, Sharan Narang, Aurelien Rodriguez, Robert Stojnic, Sergey Edunov, Thomas Scialom..의 [Llama2: Open Foundation and Fine-Tuned Chat Models](https://ai.meta.com/research/publications/llama-2-open-foundation-and-fine-tuned-chat-models/XXX)논문과 함께 발표했습니다.
|
||||||
|
1. **[Longformer](https://huggingface.co/docs/transformers/model_doc/longformer)** (AllenAI 에서) Iz Beltagy, Matthew E. Peters, Arman Cohan 의 [Longformer: The Long-Document Transformer](https://arxiv.org/abs/2004.05150) 논문과 함께 발표했습니다.
|
||||||
|
1. **[LongT5](https://huggingface.co/docs/transformers/model_doc/longt5)** (Google AI 에서) Mandy Guo, Joshua Ainslie, David Uthus, Santiago Ontanon, Jianmo Ni, Yun-Hsuan Sung, Yinfei Yang 의 [LongT5: Efficient Text-To-Text Transformer for Long Sequences](https://arxiv.org/abs/2112.07916) 논문과 함께 발표했습니다.
|
||||||
|
1. **[LUKE](https://huggingface.co/docs/transformers/model_doc/luke)** (Studio Ousia 에서) Ikuya Yamada, Akari Asai, Hiroyuki Shindo, Hideaki Takeda, Yuji Matsumoto 의 [LUKE: Deep Contextualized Entity Representations with Entity-aware Self-attention](https://arxiv.org/abs/2010.01057) 논문과 함께 발표했습니다.
|
||||||
|
1. **[LXMERT](https://huggingface.co/docs/transformers/model_doc/lxmert)** (UNC Chapel Hill 에서) Hao Tan and Mohit Bansal 의 [LXMERT: Learning Cross-Modality Encoder Representations from Transformers for Open-Domain Question Answering](https://arxiv.org/abs/1908.07490) 논문과 함께 발표했습니다.
|
||||||
|
1. **[M-CTC-T](https://huggingface.co/docs/transformers/model_doc/mctct)** (Facebook 에서) Loren Lugosch, Tatiana Likhomanenko, Gabriel Synnaeve, and Ronan Collobert 의 [Pseudo-Labeling For Massively Multilingual Speech Recognition](https://arxiv.org/abs/2111.00161) 논문과 함께 발표했습니다.
|
||||||
|
1. **[M2M100](https://huggingface.co/docs/transformers/model_doc/m2m_100)** (Facebook 에서) Angela Fan, Shruti Bhosale, Holger Schwenk, Zhiyi Ma, Ahmed El-Kishky, Siddharth Goyal, Mandeep Baines, Onur Celebi, Guillaume Wenzek, Vishrav Chaudhary, Naman Goyal, Tom Birch, Vitaliy Liptchinsky, Sergey Edunov, Edouard Grave, Michael Auli, Armand Joulin 의 [Beyond English-Centric Multilingual Machine Translation](https://arxiv.org/abs/2010.11125) 논문과 함께 발표했습니다.
|
||||||
1. **[MarianMT](https://huggingface.co/docs/transformers/model_doc/marian)** Machine translation models trained using [OPUS](http://opus.nlpl.eu/) data by Jörg Tiedemann. The [Marian Framework](https://marian-nmt.github.io/) is being developed by the Microsoft Translator Team.
|
1. **[MarianMT](https://huggingface.co/docs/transformers/model_doc/marian)** Machine translation models trained using [OPUS](http://opus.nlpl.eu/) data by Jörg Tiedemann. The [Marian Framework](https://marian-nmt.github.io/) is being developed by the Microsoft Translator Team.
|
||||||
1. **[MarkupLM](https://huggingface.co/docs/transformers/model_doc/markuplm)** (from Microsoft Research Asia) released with the paper [MarkupLM: Pre-training of Text and Markup Language for Visually-rich Document Understanding](https://arxiv.org/abs/2110.08518) by Junlong Li, Yiheng Xu, Lei Cui, Furu Wei.
|
1. **[MarkupLM](https://huggingface.co/docs/transformers/model_doc/markuplm)** (Microsoft Research Asia 에서) Junlong Li, Yiheng Xu, Lei Cui, Furu Wei 의 [MarkupLM: Pre-training of Text and Markup Language for Visually-rich Document Understanding](https://arxiv.org/abs/2110.08518) 논문과 함께 발표했습니다.
|
||||||
1. **[MaskFormer](https://huggingface.co/docs/transformers/model_doc/maskformer)** (from Meta and UIUC) released with the paper [Per-Pixel Classification is Not All You Need for Semantic Segmentation](https://arxiv.org/abs/2107.06278) by Bowen Cheng, Alexander G. Schwing, Alexander Kirillov.
|
1. **[Mask2Former](https://huggingface.co/docs/transformers/model_doc/mask2former)** (FAIR and UIUC 에서 제공)은 Bowen Cheng, Ishan Misra, Alexander G. Schwing, Alexander Kirillov, Rohit Girdhar.의 [Masked-attention Mask Transformer for Universal Image Segmentation](https://arxiv.org/abs/2112.01527)논문과 함께 발표했습니다.
|
||||||
1. **[mBART](https://huggingface.co/docs/transformers/model_doc/mbart)** (from Facebook) released with the paper [Multilingual Denoising Pre-training for Neural Machine Translation](https://arxiv.org/abs/2001.08210) by Yinhan Liu, Jiatao Gu, Naman Goyal, Xian Li, Sergey Edunov, Marjan Ghazvininejad, Mike Lewis, Luke Zettlemoyer.
|
1. **[MaskFormer](https://huggingface.co/docs/transformers/model_doc/maskformer)** (Meta and UIUC 에서) Bowen Cheng, Alexander G. Schwing, Alexander Kirillov 의 [Per-Pixel Classification is Not All You Need for Semantic Segmentation](https://arxiv.org/abs/2107.06278) 논문과 함께 발표했습니다.
|
||||||
1. **[mBART-50](https://huggingface.co/docs/transformers/model_doc/mbart)** (from Facebook) released with the paper [Multilingual Translation with Extensible Multilingual Pretraining and Finetuning](https://arxiv.org/abs/2008.00401) by Yuqing Tang, Chau Tran, Xian Li, Peng-Jen Chen, Naman Goyal, Vishrav Chaudhary, Jiatao Gu, Angela Fan.
|
1. **[MatCha](https://huggingface.co/docs/transformers/model_doc/matcha)** (Google AI 에서 제공)은 Fangyu Liu, Francesco Piccinno, Syrine Krichene, Chenxi Pang, Kenton Lee, Mandar Joshi, Yasemin Altun, Nigel Collier, Julian Martin Eisenschlos.의 [MatCha: Enhancing Visual Language Pretraining with Math Reasoning and Chart Derendering](https://arxiv.org/abs/2212.09662)논문과 함께 발표했습니다.
|
||||||
1. **[Megatron-BERT](https://huggingface.co/docs/transformers/model_doc/megatron-bert)** (from NVIDIA) released with the paper [Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism](https://arxiv.org/abs/1909.08053) by Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper and Bryan Catanzaro.
|
1. **[mBART](https://huggingface.co/docs/transformers/model_doc/mbart)** (Facebook 에서) Yinhan Liu, Jiatao Gu, Naman Goyal, Xian Li, Sergey Edunov, Marjan Ghazvininejad, Mike Lewis, Luke Zettlemoyer 의 [Multilingual Denoising Pre-training for Neural Machine Translation](https://arxiv.org/abs/2001.08210) 논문과 함께 발표했습니다.
|
||||||
1. **[Megatron-GPT2](https://huggingface.co/docs/transformers/model_doc/megatron_gpt2)** (from NVIDIA) released with the paper [Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism](https://arxiv.org/abs/1909.08053) by Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper and Bryan Catanzaro.
|
1. **[mBART-50](https://huggingface.co/docs/transformers/model_doc/mbart)** (Facebook 에서) Yuqing Tang, Chau Tran, Xian Li, Peng-Jen Chen, Naman Goyal, Vishrav Chaudhary, Jiatao Gu, Angela Fan 의 [Multilingual Translation with Extensible Multilingual Pretraining and Finetuning](https://arxiv.org/abs/2008.00401) 논문과 함께 발표했습니다.
|
||||||
1. **[mLUKE](https://huggingface.co/docs/transformers/model_doc/mluke)** (from Studio Ousia) released with the paper [mLUKE: The Power of Entity Representations in Multilingual Pretrained Language Models](https://arxiv.org/abs/2110.08151) by Ryokan Ri, Ikuya Yamada, and Yoshimasa Tsuruoka.
|
1. **[MEGA](https://huggingface.co/docs/transformers/model_doc/mega)** (Facebook 에서 제공)은 Xuezhe Ma, Chunting Zhou, Xiang Kong, Junxian He, Liangke Gui, Graham Neubig, Jonathan May, and Luke Zettlemoyer.의 [Mega: Moving Average Equipped Gated Attention](https://arxiv.org/abs/2209.10655)논문과 함께 발표했습니다.
|
||||||
1. **[MobileBERT](https://huggingface.co/docs/transformers/model_doc/mobilebert)** (from CMU/Google Brain) released with the paper [MobileBERT: a Compact Task-Agnostic BERT for Resource-Limited Devices](https://arxiv.org/abs/2004.02984) by Zhiqing Sun, Hongkun Yu, Xiaodan Song, Renjie Liu, Yiming Yang, and Denny Zhou.
|
1. **[Megatron-BERT](https://huggingface.co/docs/transformers/model_doc/megatron-bert)** (NVIDIA 에서) Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper and Bryan Catanzaro 의 [Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism](https://arxiv.org/abs/1909.08053) 논문과 함께 발표했습니다.
|
||||||
1. **[MobileViT](https://huggingface.co/docs/transformers/model_doc/mobilevit)** (from Apple) released with the paper [MobileViT: Light-weight, General-purpose, and Mobile-friendly Vision Transformer](https://arxiv.org/abs/2110.02178) by Sachin Mehta and Mohammad Rastegari.
|
1. **[Megatron-GPT2](https://huggingface.co/docs/transformers/model_doc/megatron_gpt2)** (NVIDIA 에서) Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper and Bryan Catanzaro 의 [Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism](https://arxiv.org/abs/1909.08053) 논문과 함께 발표했습니다.
|
||||||
1. **[MPNet](https://huggingface.co/docs/transformers/model_doc/mpnet)** (from Microsoft Research) released with the paper [MPNet: Masked and Permuted Pre-training for Language Understanding](https://arxiv.org/abs/2004.09297) by Kaitao Song, Xu Tan, Tao Qin, Jianfeng Lu, Tie-Yan Liu.
|
1. **[MGP-STR](https://huggingface.co/docs/transformers/model_doc/mgp-str)** (Alibaba Research 에서 제공)은 Peng Wang, Cheng Da, and Cong Yao.의 [Multi-Granularity Prediction for Scene Text Recognition](https://arxiv.org/abs/2209.03592)논문과 함께 발표했습니다.
|
||||||
1. **[MT5](https://huggingface.co/docs/transformers/model_doc/mt5)** (from Google AI) released with the paper [mT5: A massively multilingual pre-trained text-to-text transformer](https://arxiv.org/abs/2010.11934) by Linting Xue, Noah Constant, Adam Roberts, Mihir Kale, Rami Al-Rfou, Aditya Siddhant, Aditya Barua, Colin Raffel.
|
1. **[mLUKE](https://huggingface.co/docs/transformers/model_doc/mluke)** (Studio Ousia 에서) Ryokan Ri, Ikuya Yamada, and Yoshimasa Tsuruoka 의 [mLUKE: The Power of Entity Representations in Multilingual Pretrained Language Models](https://arxiv.org/abs/2110.08151) 논문과 함께 발표했습니다.
|
||||||
1. **[MVP](https://huggingface.co/docs/transformers/model_doc/mvp)** (from RUC AI Box) released with the paper [MVP: Multi-task Supervised Pre-training for Natural Language Generation](https://arxiv.org/abs/2206.12131) by Tianyi Tang, Junyi Li, Wayne Xin Zhao and Ji-Rong Wen.
|
1. **[MMS](https://huggingface.co/docs/transformers/model_doc/mms)** (Facebook 에서 제공)은 Vineel Pratap, Andros Tjandra, Bowen Shi, Paden Tomasello, Arun Babu, Sayani Kundu, Ali Elkahky, Zhaoheng Ni, Apoorv Vyas, Maryam Fazel-Zarandi, Alexei Baevski, Yossi Adi, Xiaohui Zhang, Wei-Ning Hsu, Alexis Conneau, Michael Auli.의 [Scaling Speech Technology to 1,000+ Languages](https://arxiv.org/abs/2305.13516)논문과 함께 발표했습니다.
|
||||||
1. **[Nezha](https://huggingface.co/docs/transformers/model_doc/nezha)** (from Huawei Noah’s Ark Lab) released with the paper [NEZHA: Neural Contextualized Representation for Chinese Language Understanding](https://arxiv.org/abs/1909.00204) by Junqiu Wei, Xiaozhe Ren, Xiaoguang Li, Wenyong Huang, Yi Liao, Yasheng Wang, Jiashu Lin, Xin Jiang, Xiao Chen and Qun Liu.
|
1. **[MobileBERT](https://huggingface.co/docs/transformers/model_doc/mobilebert)** (CMU/Google Brain 에서) Zhiqing Sun, Hongkun Yu, Xiaodan Song, Renjie Liu, Yiming Yang, and Denny Zhou 의 [MobileBERT: a Compact Task-Agnostic BERT for Resource-Limited Devices](https://arxiv.org/abs/2004.02984) 논문과 함께 발표했습니다.
|
||||||
1. **[NLLB](https://huggingface.co/docs/transformers/model_doc/nllb)** (from Meta) released with the paper [No Language Left Behind: Scaling Human-Centered Machine Translation](https://arxiv.org/abs/2207.04672) by the NLLB team.
|
1. **[MobileNetV1](https://huggingface.co/docs/transformers/model_doc/mobilenet_v1)** (Google Inc. 에서) Andrew G. Howard, Menglong Zhu, Bo Chen, Dmitry Kalenichenko, Weijun Wang, Tobias Weyand, Marco Andreetto, Hartwig Adam 의 [MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications](https://arxiv.org/abs/1704.04861) 논문과 함께 발표했습니다.
|
||||||
1. **[Nyströmformer](https://huggingface.co/docs/transformers/model_doc/nystromformer)** (from the University of Wisconsin - Madison) released with the paper [Nyströmformer: A Nyström-Based Algorithm for Approximating Self-Attention](https://arxiv.org/abs/2102.03902) by Yunyang Xiong, Zhanpeng Zeng, Rudrasis Chakraborty, Mingxing Tan, Glenn Fung, Yin Li, Vikas Singh.
|
1. **[MobileNetV2](https://huggingface.co/docs/transformers/model_doc/mobilenet_v2)** (Google Inc. 에서) Mark Sandler, Andrew Howard, Menglong Zhu, Andrey Zhmoginov, Liang-Chieh Chen 의 [MobileNetV2: Inverted Residuals and Linear Bottlenecks](https://arxiv.org/abs/1801.04381) 논문과 함께 발표했습니다.
|
||||||
1. **[OPT](https://huggingface.co/docs/transformers/master/model_doc/opt)** (from Meta AI) released with the paper [OPT: Open Pre-trained Transformer Language Models](https://arxiv.org/abs/2205.01068) by Susan Zhang, Stephen Roller, Naman Goyal, Mikel Artetxe, Moya Chen, Shuohui Chen et al.
|
1. **[MobileViT](https://huggingface.co/docs/transformers/model_doc/mobilevit)** (Apple 에서) Sachin Mehta and Mohammad Rastegari 의 [MobileViT: Light-weight, General-purpose, and Mobile-friendly Vision Transformer](https://arxiv.org/abs/2110.02178) 논문과 함께 발표했습니다.
|
||||||
1. **[OWL-ViT](https://huggingface.co/docs/transformers/model_doc/owlvit)** (from Google AI) released with the paper [Simple Open-Vocabulary Object Detection with Vision Transformers](https://arxiv.org/abs/2205.06230) by Matthias Minderer, Alexey Gritsenko, Austin Stone, Maxim Neumann, Dirk Weissenborn, Alexey Dosovitskiy, Aravindh Mahendran, Anurag Arnab, Mostafa Dehghani, Zhuoran Shen, Xiao Wang, Xiaohua Zhai, Thomas Kipf, and Neil Houlsby.
|
1. **[MobileViTV2](https://huggingface.co/docs/transformers/model_doc/mobilevitv2)** (Apple 에서 제공)은 Sachin Mehta and Mohammad Rastegari.의 [Separable Self-attention for Mobile Vision Transformers](https://arxiv.org/abs/2206.02680)논문과 함께 발표했습니다.
|
||||||
1. **[Pegasus](https://huggingface.co/docs/transformers/model_doc/pegasus)** (from Google) released with the paper [PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive Summarization](https://arxiv.org/abs/1912.08777) by Jingqing Zhang, Yao Zhao, Mohammad Saleh and Peter J. Liu.
|
1. **[MPNet](https://huggingface.co/docs/transformers/model_doc/mpnet)** (Microsoft Research 에서) Kaitao Song, Xu Tan, Tao Qin, Jianfeng Lu, Tie-Yan Liu 의 [MPNet: Masked and Permuted Pre-training for Language Understanding](https://arxiv.org/abs/2004.09297) 논문과 함께 발표했습니다.
|
||||||
1. **[PEGASUS-X](https://huggingface.co/docs/transformers/model_doc/pegasus_x)** (from Google) released with the paper [Investigating Efficiently Extending Transformers for Long Input Summarization](https://arxiv.org/abs/2208.04347) by Jason Phang, Yao Zhao, Peter J. Liu.
|
1. **[MRA](https://huggingface.co/docs/transformers/model_doc/mra)** (the University of Wisconsin - Madison 에서 제공)은 Zhanpeng Zeng, Sourav Pal, Jeffery Kline, Glenn M Fung, Vikas Singh.의 [Multi Resolution Analysis (MRA)](https://arxiv.org/abs/2207.10284) 논문과 함께 발표했습니다.
|
||||||
1. **[Perceiver IO](https://huggingface.co/docs/transformers/model_doc/perceiver)** (from Deepmind) released with the paper [Perceiver IO: A General Architecture for Structured Inputs & Outputs](https://arxiv.org/abs/2107.14795) by Andrew Jaegle, Sebastian Borgeaud, Jean-Baptiste Alayrac, Carl Doersch, Catalin Ionescu, David Ding, Skanda Koppula, Daniel Zoran, Andrew Brock, Evan Shelhamer, Olivier Hénaff, Matthew M. Botvinick, Andrew Zisserman, Oriol Vinyals, João Carreira.
|
1. **[MT5](https://huggingface.co/docs/transformers/model_doc/mt5)** (Google AI 에서) Linting Xue, Noah Constant, Adam Roberts, Mihir Kale, Rami Al-Rfou, Aditya Siddhant, Aditya Barua, Colin Raffel 의 [mT5: A massively multilingual pre-trained text-to-text transformer](https://arxiv.org/abs/2010.11934) 논문과 함께 발표했습니다.
|
||||||
1. **[PhoBERT](https://huggingface.co/docs/transformers/model_doc/phobert)** (from VinAI Research) released with the paper [PhoBERT: Pre-trained language models for Vietnamese](https://www.aclweb.org/anthology/2020.findings-emnlp.92/) by Dat Quoc Nguyen and Anh Tuan Nguyen.
|
1. **[MusicGen](https://huggingface.co/docs/transformers/model_doc/musicgen)** (from Meta) released with the paper [Simple and Controllable Music Generation](https://arxiv.org/abs/2306.05284) by Jade Copet, Felix Kreuk, Itai Gat, Tal Remez, David Kant, Gabriel Synnaeve, Yossi Adi and Alexandre Défossez.
|
||||||
1. **[PLBart](https://huggingface.co/docs/transformers/model_doc/plbart)** (from UCLA NLP) released with the paper [Unified Pre-training for Program Understanding and Generation](https://arxiv.org/abs/2103.06333) by Wasi Uddin Ahmad, Saikat Chakraborty, Baishakhi Ray, Kai-Wei Chang.
|
1. **[MVP](https://huggingface.co/docs/transformers/model_doc/mvp)** (RUC AI Box 에서) Tianyi Tang, Junyi Li, Wayne Xin Zhao and Ji-Rong Wen 의 [MVP: Multi-task Supervised Pre-training for Natural Language Generation](https://arxiv.org/abs/2206.12131) 논문과 함께 발표했습니다.
|
||||||
1. **[PoolFormer](https://huggingface.co/docs/transformers/model_doc/poolformer)** (from Sea AI Labs) released with the paper [MetaFormer is Actually What You Need for Vision](https://arxiv.org/abs/2111.11418) by Yu, Weihao and Luo, Mi and Zhou, Pan and Si, Chenyang and Zhou, Yichen and Wang, Xinchao and Feng, Jiashi and Yan, Shuicheng.
|
1. **[NAT](https://huggingface.co/docs/transformers/model_doc/nat)** (SHI Labs 에서) Ali Hassani, Steven Walton, Jiachen Li, Shen Li, and Humphrey Shi 의 [Neighborhood Attention Transformer](https://arxiv.org/abs/2204.07143) 논문과 함께 발표했습니다.
|
||||||
1. **[ProphetNet](https://huggingface.co/docs/transformers/model_doc/prophetnet)** (from Microsoft Research) released with the paper [ProphetNet: Predicting Future N-gram for Sequence-to-Sequence Pre-training](https://arxiv.org/abs/2001.04063) by Yu Yan, Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang and Ming Zhou.
|
1. **[Nezha](https://huggingface.co/docs/transformers/model_doc/nezha)** (Huawei Noah’s Ark Lab 에서) Junqiu Wei, Xiaozhe Ren, Xiaoguang Li, Wenyong Huang, Yi Liao, Yasheng Wang, Jiashu Lin, Xin Jiang, Xiao Chen and Qun Liu 의 [NEZHA: Neural Contextualized Representation for Chinese Language Understanding](https://arxiv.org/abs/1909.00204) 논문과 함께 발표했습니다.
|
||||||
1. **[QDQBert](https://huggingface.co/docs/transformers/model_doc/qdqbert)** (from NVIDIA) released with the paper [Integer Quantization for Deep Learning Inference: Principles and Empirical Evaluation](https://arxiv.org/abs/2004.09602) by Hao Wu, Patrick Judd, Xiaojie Zhang, Mikhail Isaev and Paulius Micikevicius.
|
1. **[NLLB](https://huggingface.co/docs/transformers/model_doc/nllb)** (Meta 에서) the NLLB team 의 [No Language Left Behind: Scaling Human-Centered Machine Translation](https://arxiv.org/abs/2207.04672) 논문과 함께 발표했습니다.
|
||||||
1. **[RAG](https://huggingface.co/docs/transformers/model_doc/rag)** (from Facebook) released with the paper [Retrieval-Augmented Generation for Knowledge-Intensive NLP Tasks](https://arxiv.org/abs/2005.11401) by Patrick Lewis, Ethan Perez, Aleksandara Piktus, Fabio Petroni, Vladimir Karpukhin, Naman Goyal, Heinrich Küttler, Mike Lewis, Wen-tau Yih, Tim Rocktäschel, Sebastian Riedel, Douwe Kiela.
|
1. **[NLLB-MOE](https://huggingface.co/docs/transformers/model_doc/nllb-moe)** (Meta 에서 제공)은 the NLLB team.의 [No Language Left Behind: Scaling Human-Centered Machine Translation](https://arxiv.org/abs/2207.04672)논문과 함께 발표했습니다.
|
||||||
1. **[REALM](https://huggingface.co/docs/transformers/model_doc/realm.html)** (from Google Research) released with the paper [REALM: Retrieval-Augmented Language Model Pre-Training](https://arxiv.org/abs/2002.08909) by Kelvin Guu, Kenton Lee, Zora Tung, Panupong Pasupat and Ming-Wei Chang.
|
1. **[Nyströmformer](https://huggingface.co/docs/transformers/model_doc/nystromformer)** (the University of Wisconsin - Madison 에서) Yunyang Xiong, Zhanpeng Zeng, Rudrasis Chakraborty, Mingxing Tan, Glenn Fung, Yin Li, Vikas Singh 의 [Nyströmformer: A Nyström-Based Algorithm for Approximating Self-Attention](https://arxiv.org/abs/2102.03902) 논문과 함께 발표했습니다.
|
||||||
1. **[Reformer](https://huggingface.co/docs/transformers/model_doc/reformer)** (from Google Research) released with the paper [Reformer: The Efficient Transformer](https://arxiv.org/abs/2001.04451) by Nikita Kitaev, Łukasz Kaiser, Anselm Levskaya.
|
1. **[OneFormer](https://huggingface.co/docs/transformers/model_doc/oneformer)** (SHI Labs 에서) Jitesh Jain, Jiachen Li, MangTik Chiu, Ali Hassani, Nikita Orlov, Humphrey Shi 의 [OneFormer: One Transformer to Rule Universal Image Segmentation](https://arxiv.org/abs/2211.06220) 논문과 함께 발표했습니다.
|
||||||
1. **[RegNet](https://huggingface.co/docs/transformers/model_doc/regnet)** (from META Research) released with the paper [Designing Network Design Space](https://arxiv.org/abs/2003.13678) by Ilija Radosavovic, Raj Prateek Kosaraju, Ross Girshick, Kaiming He, Piotr Dollár.
|
1. **[OpenLlama](https://huggingface.co/docs/transformers/model_doc/open-llama)** (from [s-JoL](https://huggingface.co/s-JoL)) released in [Open-Llama](https://github.com/s-JoL/Open-Llama).
|
||||||
1. **[RemBERT](https://huggingface.co/docs/transformers/model_doc/rembert)** (from Google Research) released with the paper [Rethinking embedding coupling in pre-trained language models](https://arxiv.org/pdf/2010.12821.pdf) by Hyung Won Chung, Thibault Févry, Henry Tsai, M. Johnson, Sebastian Ruder.
|
1. **[OPT](https://huggingface.co/docs/transformers/master/model_doc/opt)** (Meta AI 에서) Susan Zhang, Stephen Roller, Naman Goyal, Mikel Artetxe, Moya Chen, Shuohui Chen et al 의 [OPT: Open Pre-trained Transformer Language Models](https://arxiv.org/abs/2205.01068) 논문과 함께 발표했습니다.
|
||||||
1. **[ResNet](https://huggingface.co/docs/transformers/model_doc/resnet)** (from Microsoft Research) released with the paper [Deep Residual Learning for Image Recognition](https://arxiv.org/abs/1512.03385) by Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun.
|
1. **[OWL-ViT](https://huggingface.co/docs/transformers/model_doc/owlvit)** (Google AI 에서) Matthias Minderer, Alexey Gritsenko, Austin Stone, Maxim Neumann, Dirk Weissenborn, Alexey Dosovitskiy, Aravindh Mahendran, Anurag Arnab, Mostafa Dehghani, Zhuoran Shen, Xiao Wang, Xiaohua Zhai, Thomas Kipf, and Neil Houlsby 의 [Simple Open-Vocabulary Object Detection with Vision Transformers](https://arxiv.org/abs/2205.06230) 논문과 함께 발표했습니다.
|
||||||
1. **[RoBERTa](https://huggingface.co/docs/transformers/model_doc/roberta)** (from Facebook), released together with the paper a [Robustly Optimized BERT Pretraining Approach](https://arxiv.org/abs/1907.11692) by Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, Veselin Stoyanov.
|
1. **[Pegasus](https://huggingface.co/docs/transformers/model_doc/pegasus)** (Google 에서) Jingqing Zhang, Yao Zhao, Mohammad Saleh and Peter J. Liu 의 [PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive Summarization](https://arxiv.org/abs/1912.08777) 논문과 함께 발표했습니다.
|
||||||
1. **[RoFormer](https://huggingface.co/docs/transformers/model_doc/roformer)** (from ZhuiyiTechnology), released together with the paper a [RoFormer: Enhanced Transformer with Rotary Position Embedding](https://arxiv.org/pdf/2104.09864v1.pdf) by Jianlin Su and Yu Lu and Shengfeng Pan and Bo Wen and Yunfeng Liu.
|
1. **[PEGASUS-X](https://huggingface.co/docs/transformers/model_doc/pegasus_x)** (Google 에서) Jason Phang, Yao Zhao, Peter J. Liu 의 [Investigating Efficiently Extending Transformers for Long Input Summarization](https://arxiv.org/abs/2208.04347) 논문과 함께 발표했습니다.
|
||||||
1. **[SegFormer](https://huggingface.co/docs/transformers/model_doc/segformer)** (from NVIDIA) released with the paper [SegFormer: Simple and Efficient Design for Semantic Segmentation with Transformers](https://arxiv.org/abs/2105.15203) by Enze Xie, Wenhai Wang, Zhiding Yu, Anima Anandkumar, Jose M. Alvarez, Ping Luo.
|
1. **[Perceiver IO](https://huggingface.co/docs/transformers/model_doc/perceiver)** (Deepmind 에서) Andrew Jaegle, Sebastian Borgeaud, Jean-Baptiste Alayrac, Carl Doersch, Catalin Ionescu, David Ding, Skanda Koppula, Daniel Zoran, Andrew Brock, Evan Shelhamer, Olivier Hénaff, Matthew M. Botvinick, Andrew Zisserman, Oriol Vinyals, João Carreira 의 [Perceiver IO: A General Architecture for Structured Inputs & Outputs](https://arxiv.org/abs/2107.14795) 논문과 함께 발표했습니다.
|
||||||
1. **[SEW](https://huggingface.co/docs/transformers/model_doc/sew)** (from ASAPP) released with the paper [Performance-Efficiency Trade-offs in Unsupervised Pre-training for Speech Recognition](https://arxiv.org/abs/2109.06870) by Felix Wu, Kwangyoun Kim, Jing Pan, Kyu Han, Kilian Q. Weinberger, Yoav Artzi.
|
1. **[PhoBERT](https://huggingface.co/docs/transformers/model_doc/phobert)** (VinAI Research 에서) Dat Quoc Nguyen and Anh Tuan Nguyen 의 [PhoBERT: Pre-trained language models for Vietnamese](https://www.aclweb.org/anthology/2020.findings-emnlp.92/) 논문과 함께 발표했습니다.
|
||||||
1. **[SEW-D](https://huggingface.co/docs/transformers/model_doc/sew_d)** (from ASAPP) released with the paper [Performance-Efficiency Trade-offs in Unsupervised Pre-training for Speech Recognition](https://arxiv.org/abs/2109.06870) by Felix Wu, Kwangyoun Kim, Jing Pan, Kyu Han, Kilian Q. Weinberger, Yoav Artzi.
|
1. **[Pix2Struct](https://huggingface.co/docs/transformers/model_doc/pix2struct)** (Google 에서 제공)은 Kenton Lee, Mandar Joshi, Iulia Turc, Hexiang Hu, Fangyu Liu, Julian Eisenschlos, Urvashi Khandelwal, Peter Shaw, Ming-Wei Chang, Kristina Toutanova.의 [Pix2Struct: Screenshot Parsing as Pretraining for Visual Language Understanding](https://arxiv.org/abs/2210.03347)논문과 함께 발표했습니다.
|
||||||
1. **[SpeechToTextTransformer](https://huggingface.co/docs/transformers/model_doc/speech_to_text)** (from Facebook), released together with the paper [fairseq S2T: Fast Speech-to-Text Modeling with fairseq](https://arxiv.org/abs/2010.05171) by Changhan Wang, Yun Tang, Xutai Ma, Anne Wu, Dmytro Okhonko, Juan Pino.
|
1. **[PLBart](https://huggingface.co/docs/transformers/model_doc/plbart)** (UCLA NLP 에서) Wasi Uddin Ahmad, Saikat Chakraborty, Baishakhi Ray, Kai-Wei Chang 의 [Unified Pre-training for Program Understanding and Generation](https://arxiv.org/abs/2103.06333) 논문과 함께 발표했습니다.
|
||||||
1. **[SpeechToTextTransformer2](https://huggingface.co/docs/transformers/model_doc/speech_to_text_2)** (from Facebook), released together with the paper [Large-Scale Self- and Semi-Supervised Learning for Speech Translation](https://arxiv.org/abs/2104.06678) by Changhan Wang, Anne Wu, Juan Pino, Alexei Baevski, Michael Auli, Alexis Conneau.
|
1. **[PoolFormer](https://huggingface.co/docs/transformers/model_doc/poolformer)** (Sea AI Labs 에서) Yu, Weihao and Luo, Mi and Zhou, Pan and Si, Chenyang and Zhou, Yichen and Wang, Xinchao and Feng, Jiashi and Yan, Shuicheng 의 [MetaFormer is Actually What You Need for Vision](https://arxiv.org/abs/2111.11418) 논문과 함께 발표했습니다.
|
||||||
1. **[Splinter](https://huggingface.co/docs/transformers/model_doc/splinter)** (from Tel Aviv University), released together with the paper [Few-Shot Question Answering by Pretraining Span Selection](https://arxiv.org/abs/2101.00438) by Ori Ram, Yuval Kirstain, Jonathan Berant, Amir Globerson, Omer Levy.
|
1. **[ProphetNet](https://huggingface.co/docs/transformers/model_doc/prophetnet)** (Microsoft Research 에서) Yu Yan, Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang and Ming Zhou 의 [ProphetNet: Predicting Future N-gram for Sequence-to-Sequence Pre-training](https://arxiv.org/abs/2001.04063) 논문과 함께 발표했습니다.
|
||||||
1. **[SqueezeBERT](https://huggingface.co/docs/transformers/model_doc/squeezebert)** (from Berkeley) released with the paper [SqueezeBERT: What can computer vision teach NLP about efficient neural networks?](https://arxiv.org/abs/2006.11316) by Forrest N. Iandola, Albert E. Shaw, Ravi Krishna, and Kurt W. Keutzer.
|
1. **[QDQBert](https://huggingface.co/docs/transformers/model_doc/qdqbert)** (NVIDIA 에서) Hao Wu, Patrick Judd, Xiaojie Zhang, Mikhail Isaev and Paulius Micikevicius 의 [Integer Quantization for Deep Learning Inference: Principles and Empirical Evaluation](https://arxiv.org/abs/2004.09602) 논문과 함께 발표했습니다.
|
||||||
1. **[Swin Transformer](https://huggingface.co/docs/transformers/model_doc/swin)** (from Microsoft) released with the paper [Swin Transformer: Hierarchical Vision Transformer using Shifted Windows](https://arxiv.org/abs/2103.14030) by Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, Baining Guo.
|
1. **[RAG](https://huggingface.co/docs/transformers/model_doc/rag)** (Facebook 에서) Patrick Lewis, Ethan Perez, Aleksandara Piktus, Fabio Petroni, Vladimir Karpukhin, Naman Goyal, Heinrich Küttler, Mike Lewis, Wen-tau Yih, Tim Rocktäschel, Sebastian Riedel, Douwe Kiela 의 [Retrieval-Augmented Generation for Knowledge-Intensive NLP Tasks](https://arxiv.org/abs/2005.11401) 논문과 함께 발표했습니다.
|
||||||
1. **[Swin Transformer V2](https://huggingface.co/docs/transformers/model_doc/swinv2)** (from Microsoft) released with the paper [Swin Transformer V2: Scaling Up Capacity and Resolution](https://arxiv.org/abs/2111.09883) by Ze Liu, Han Hu, Yutong Lin, Zhuliang Yao, Zhenda Xie, Yixuan Wei, Jia Ning, Yue Cao, Zheng Zhang, Li Dong, Furu Wei, Baining Guo.
|
1. **[REALM](https://huggingface.co/docs/transformers/model_doc/realm.html)** (Google Research 에서) Kelvin Guu, Kenton Lee, Zora Tung, Panupong Pasupat and Ming-Wei Chang 의 [REALM: Retrieval-Augmented Language Model Pre-Training](https://arxiv.org/abs/2002.08909) 논문과 함께 발표했습니다.
|
||||||
1. **[T5](https://huggingface.co/docs/transformers/model_doc/t5)** (from Google AI) released with the paper [Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer](https://arxiv.org/abs/1910.10683) by Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu.
|
1. **[Reformer](https://huggingface.co/docs/transformers/model_doc/reformer)** (Google Research 에서) Nikita Kitaev, Łukasz Kaiser, Anselm Levskaya 의 [Reformer: The Efficient Transformer](https://arxiv.org/abs/2001.04451) 논문과 함께 발표했습니다.
|
||||||
|
1. **[RegNet](https://huggingface.co/docs/transformers/model_doc/regnet)** (META Research 에서) Ilija Radosavovic, Raj Prateek Kosaraju, Ross Girshick, Kaiming He, Piotr Dollár 의 [Designing Network Design Space](https://arxiv.org/abs/2003.13678) 논문과 함께 발표했습니다.
|
||||||
|
1. **[RemBERT](https://huggingface.co/docs/transformers/model_doc/rembert)** (Google Research 에서) Hyung Won Chung, Thibault Févry, Henry Tsai, M. Johnson, Sebastian Ruder 의 [Rethinking embedding coupling in pre-trained language models](https://arxiv.org/pdf/2010.12821.pdf) 논문과 함께 발표했습니다.
|
||||||
|
1. **[ResNet](https://huggingface.co/docs/transformers/model_doc/resnet)** (Microsoft Research 에서) Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun 의 [Deep Residual Learning for Image Recognition](https://arxiv.org/abs/1512.03385) 논문과 함께 발표했습니다.
|
||||||
|
1. **[RoBERTa](https://huggingface.co/docs/transformers/model_doc/roberta)** (Facebook 에서) Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, Veselin Stoyanov 의 a [Robustly Optimized BERT Pretraining Approach](https://arxiv.org/abs/1907.11692) 논문과 함께 발표했습니다.
|
||||||
|
1. **[RoBERTa-PreLayerNorm](https://huggingface.co/docs/transformers/model_doc/roberta-prelayernorm)** (Facebook 에서) Myle Ott, Sergey Edunov, Alexei Baevski, Angela Fan, Sam Gross, Nathan Ng, David Grangier, Michael Auli 의 [fairseq: A Fast, Extensible Toolkit for Sequence Modeling](https://arxiv.org/abs/1904.01038) 논문과 함께 발표했습니다.
|
||||||
|
1. **[RoCBert](https://huggingface.co/docs/transformers/model_doc/roc_bert)** (WeChatAI 에서) HuiSu, WeiweiShi, XiaoyuShen, XiaoZhou, TuoJi, JiaruiFang, JieZhou 의 [RoCBert: Robust Chinese Bert with Multimodal Contrastive Pretraining](https://aclanthology.org/2022.acl-long.65.pdf) 논문과 함께 발표했습니다.
|
||||||
|
1. **[RoFormer](https://huggingface.co/docs/transformers/model_doc/roformer)** (ZhuiyiTechnology 에서) Jianlin Su and Yu Lu and Shengfeng Pan and Bo Wen and Yunfeng Liu 의 a [RoFormer: Enhanced Transformer with Rotary Position Embedding](https://arxiv.org/pdf/2104.09864v1.pdf) 논문과 함께 발표했습니다.
|
||||||
|
1. **[RWKV](https://huggingface.co/docs/transformers/model_doc/rwkv)** (Bo Peng 에서 제공)은 Bo Peng.의 [this repo](https://github.com/BlinkDL/RWKV-LM)논문과 함께 발표했습니다.
|
||||||
|
1. **[SegFormer](https://huggingface.co/docs/transformers/model_doc/segformer)** (NVIDIA 에서) Enze Xie, Wenhai Wang, Zhiding Yu, Anima Anandkumar, Jose M. Alvarez, Ping Luo 의 [SegFormer: Simple and Efficient Design for Semantic Segmentation with Transformers](https://arxiv.org/abs/2105.15203) 논문과 함께 발표했습니다.
|
||||||
|
1. **[Segment Anything](https://huggingface.co/docs/transformers/model_doc/sam)** (Meta AI 에서 제공)은 Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alex Berg, Wan-Yen Lo, Piotr Dollar, Ross Girshick.의 [Segment Anything](https://arxiv.org/pdf/2304.02643v1.pdf)논문과 함께 발표했습니다.
|
||||||
|
1. **[SEW](https://huggingface.co/docs/transformers/model_doc/sew)** (ASAPP 에서) Felix Wu, Kwangyoun Kim, Jing Pan, Kyu Han, Kilian Q. Weinberger, Yoav Artzi 의 [Performance-Efficiency Trade-offs in Unsupervised Pre-training for Speech Recognition](https://arxiv.org/abs/2109.06870) 논문과 함께 발표했습니다.
|
||||||
|
1. **[SEW-D](https://huggingface.co/docs/transformers/model_doc/sew_d)** (ASAPP 에서) Felix Wu, Kwangyoun Kim, Jing Pan, Kyu Han, Kilian Q. Weinberger, Yoav Artzi 의 [Performance-Efficiency Trade-offs in Unsupervised Pre-training for Speech Recognition](https://arxiv.org/abs/2109.06870) 논문과 함께 발표했습니다.
|
||||||
|
1. **[SpeechT5](https://huggingface.co/docs/transformers/model_doc/speecht5)** (Microsoft Research 에서 제공)은 Junyi Ao, Rui Wang, Long Zhou, Chengyi Wang, Shuo Ren, Yu Wu, Shujie Liu, Tom Ko, Qing Li, Yu Zhang, Zhihua Wei, Yao Qian, Jinyu Li, Furu Wei.의 [SpeechT5: Unified-Modal Encoder-Decoder Pre-Training for Spoken Language Processing](https://arxiv.org/abs/2110.07205)논문과 함께 발표했습니다.
|
||||||
|
1. **[SpeechToTextTransformer](https://huggingface.co/docs/transformers/model_doc/speech_to_text)** (Facebook 에서) Changhan Wang, Yun Tang, Xutai Ma, Anne Wu, Dmytro Okhonko, Juan Pino 의 [fairseq S2T: Fast Speech-to-Text Modeling with fairseq](https://arxiv.org/abs/2010.05171) 논문과 함께 발표했습니다.
|
||||||
|
1. **[SpeechToTextTransformer2](https://huggingface.co/docs/transformers/model_doc/speech_to_text_2)** (Facebook 에서) Changhan Wang, Anne Wu, Juan Pino, Alexei Baevski, Michael Auli, Alexis Conneau 의 [Large-Scale Self- and Semi-Supervised Learning for Speech Translation](https://arxiv.org/abs/2104.06678) 논문과 함께 발표했습니다.
|
||||||
|
1. **[Splinter](https://huggingface.co/docs/transformers/model_doc/splinter)** (Tel Aviv University 에서) Ori Ram, Yuval Kirstain, Jonathan Berant, Amir Globerson, Omer Levy 의 [Few-Shot Question Answering by Pretraining Span Selection](https://arxiv.org/abs/2101.00438) 논문과 함께 발표했습니다.
|
||||||
|
1. **[SqueezeBERT](https://huggingface.co/docs/transformers/model_doc/squeezebert)** (Berkeley 에서) Forrest N. Iandola, Albert E. Shaw, Ravi Krishna, and Kurt W. Keutzer 의 [SqueezeBERT: What can computer vision teach NLP about efficient neural networks?](https://arxiv.org/abs/2006.11316) 논문과 함께 발표했습니다.
|
||||||
|
1. **[SwiftFormer](https://huggingface.co/docs/transformers/model_doc/swiftformer)** (MBZUAI 에서 제공)은 Abdelrahman Shaker, Muhammad Maaz, Hanoona Rasheed, Salman Khan, Ming-Hsuan Yang, Fahad Shahbaz Khan.의 [SwiftFormer: Efficient Additive Attention for Transformer-based Real-time Mobile Vision Applications](https://arxiv.org/abs/2303.15446)논문과 함께 발표했습니다.
|
||||||
|
1. **[Swin Transformer](https://huggingface.co/docs/transformers/model_doc/swin)** (Microsoft 에서) Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, Baining Guo 의 [Swin Transformer: Hierarchical Vision Transformer using Shifted Windows](https://arxiv.org/abs/2103.14030) 논문과 함께 발표했습니다.
|
||||||
|
1. **[Swin Transformer V2](https://huggingface.co/docs/transformers/model_doc/swinv2)** (Microsoft 에서) Ze Liu, Han Hu, Yutong Lin, Zhuliang Yao, Zhenda Xie, Yixuan Wei, Jia Ning, Yue Cao, Zheng Zhang, Li Dong, Furu Wei, Baining Guo 의 [Swin Transformer V2: Scaling Up Capacity and Resolution](https://arxiv.org/abs/2111.09883) 논문과 함께 발표했습니다.
|
||||||
|
1. **[Swin2SR](https://huggingface.co/docs/transformers/model_doc/swin2sr)** (University of Würzburg 에서) Marcos V. Conde, Ui-Jin Choi, Maxime Burchi, Radu Timofte 의 [Swin2SR: SwinV2 Transformer for Compressed Image Super-Resolution and Restoration](https://arxiv.org/abs/2209.11345) 논문과 함께 발표했습니다.
|
||||||
|
1. **[SwitchTransformers](https://huggingface.co/docs/transformers/model_doc/switch_transformers)** (Google 에서) William Fedus, Barret Zoph, Noam Shazeer. 의 [Switch Transformers: Scaling to Trillion Parameter Models with Simple and Efficient Sparsity](https://arxiv.org/abs/2101.03961) 논문과 함께 발표했습니다.
|
||||||
|
1. **[T5](https://huggingface.co/docs/transformers/model_doc/t5)** (Google AI 에서) Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu 의 [Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer](https://arxiv.org/abs/1910.10683) 논문과 함께 발표했습니다.
|
||||||
1. **[T5v1.1](https://huggingface.co/docs/transformers/model_doc/t5v1.1)** (from Google AI) released in the repository [google-research/text-to-text-transfer-transformer](https://github.com/google-research/text-to-text-transfer-transformer/blob/main/released_checkpoints.md#t511) by Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu.
|
1. **[T5v1.1](https://huggingface.co/docs/transformers/model_doc/t5v1.1)** (from Google AI) released in the repository [google-research/text-to-text-transfer-transformer](https://github.com/google-research/text-to-text-transfer-transformer/blob/main/released_checkpoints.md#t511) by Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu.
|
||||||
1. **[TAPAS](https://huggingface.co/docs/transformers/model_doc/tapas)** (from Google AI) released with the paper [TAPAS: Weakly Supervised Table Parsing via Pre-training](https://arxiv.org/abs/2004.02349) by Jonathan Herzig, Paweł Krzysztof Nowak, Thomas Müller, Francesco Piccinno and Julian Martin Eisenschlos.
|
1. **[Table Transformer](https://huggingface.co/docs/transformers/model_doc/table-transformer)** (Microsoft Research 에서) Brandon Smock, Rohith Pesala, Robin Abraham 의 [PubTables-1M: Towards Comprehensive Table Extraction From Unstructured Documents](https://arxiv.org/abs/2110.00061) 논문과 함께 발표했습니다.
|
||||||
1. **[TAPEX](https://huggingface.co/docs/transformers/model_doc/tapex)** (from Microsoft Research) released with the paper [TAPEX: Table Pre-training via Learning a Neural SQL Executor](https://arxiv.org/abs/2107.07653) by Qian Liu, Bei Chen, Jiaqi Guo, Morteza Ziyadi, Zeqi Lin, Weizhu Chen, Jian-Guang Lou.
|
1. **[TAPAS](https://huggingface.co/docs/transformers/model_doc/tapas)** (Google AI 에서) Jonathan Herzig, Paweł Krzysztof Nowak, Thomas Müller, Francesco Piccinno and Julian Martin Eisenschlos 의 [TAPAS: Weakly Supervised Table Parsing via Pre-training](https://arxiv.org/abs/2004.02349) 논문과 함께 발표했습니다.
|
||||||
|
1. **[TAPEX](https://huggingface.co/docs/transformers/model_doc/tapex)** (Microsoft Research 에서) Qian Liu, Bei Chen, Jiaqi Guo, Morteza Ziyadi, Zeqi Lin, Weizhu Chen, Jian-Guang Lou 의 [TAPEX: Table Pre-training via Learning a Neural SQL Executor](https://arxiv.org/abs/2107.07653) 논문과 함께 발표했습니다.
|
||||||
1. **[Time Series Transformer](https://huggingface.co/docs/transformers/model_doc/time_series_transformer)** (from HuggingFace).
|
1. **[Time Series Transformer](https://huggingface.co/docs/transformers/model_doc/time_series_transformer)** (from HuggingFace).
|
||||||
1. **[Trajectory Transformer](https://huggingface.co/docs/transformers/model_doc/trajectory_transformers)** (from the University of California at Berkeley) released with the paper [Offline Reinforcement Learning as One Big Sequence Modeling Problem](https://arxiv.org/abs/2106.02039) by Michael Janner, Qiyang Li, Sergey Levine
|
1. **[TimeSformer](https://huggingface.co/docs/transformers/model_doc/timesformer)** (Facebook 에서) Gedas Bertasius, Heng Wang, Lorenzo Torresani 의 [Is Space-Time Attention All You Need for Video Understanding?](https://arxiv.org/abs/2102.05095) 논문과 함께 발표했습니다.
|
||||||
1. **[Transformer-XL](https://huggingface.co/docs/transformers/model_doc/transfo-xl)** (from Google/CMU) released with the paper [Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context](https://arxiv.org/abs/1901.02860) by Zihang Dai*, Zhilin Yang*, Yiming Yang, Jaime Carbonell, Quoc V. Le, Ruslan Salakhutdinov.
|
1. **[Trajectory Transformer](https://huggingface.co/docs/transformers/model_doc/trajectory_transformers)** (the University of California at Berkeley 에서) Michael Janner, Qiyang Li, Sergey Levin 의 [Offline Reinforcement Learning as One Big Sequence Modeling Problem](https://arxiv.org/abs/2106.02039) 논문과 함께 발표했습니다.
|
||||||
1. **[TrOCR](https://huggingface.co/docs/transformers/model_doc/trocr)** (from Microsoft), released together with the paper [TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models](https://arxiv.org/abs/2109.10282) by Minghao Li, Tengchao Lv, Lei Cui, Yijuan Lu, Dinei Florencio, Cha Zhang, Zhoujun Li, Furu Wei.
|
1. **[Transformer-XL](https://huggingface.co/docs/transformers/model_doc/transfo-xl)** (Google/CMU 에서) Zihang Dai*, Zhilin Yang*, Yiming Yang, Jaime Carbonell, Quoc V. Le, Ruslan Salakhutdinov 의 [Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context](https://arxiv.org/abs/1901.02860) 논문과 함께 발표했습니다.
|
||||||
1. **[UL2](https://huggingface.co/docs/transformers/model_doc/ul2)** (from Google Research) released with the paper [Unifying Language Learning Paradigms](https://arxiv.org/abs/2205.05131v1) by Yi Tay, Mostafa Dehghani, Vinh Q. Tran, Xavier Garcia, Dara Bahri, Tal Schuster, Huaixiu Steven Zheng, Neil Houlsby, Donald Metzler
|
1. **[TrOCR](https://huggingface.co/docs/transformers/model_doc/trocr)** (Microsoft 에서) Minghao Li, Tengchao Lv, Lei Cui, Yijuan Lu, Dinei Florencio, Cha Zhang, Zhoujun Li, Furu Wei 의 [TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models](https://arxiv.org/abs/2109.10282) 논문과 함께 발표했습니다.
|
||||||
1. **[UniSpeech](https://huggingface.co/docs/transformers/model_doc/unispeech)** (from Microsoft Research) released with the paper [UniSpeech: Unified Speech Representation Learning with Labeled and Unlabeled Data](https://arxiv.org/abs/2101.07597) by Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei, Michael Zeng, Xuedong Huang.
|
1. **[TVLT](https://huggingface.co/docs/transformers/model_doc/tvlt)** (from UNC Chapel Hill 에서) Zineng Tang, Jaemin Cho, Yixin Nie, Mohit Bansal 의 [TVLT: Textless Vision-Language Transformer](https://arxiv.org/abs/2209.14156) 논문과 함께 발표했습니다.
|
||||||
1. **[UniSpeechSat](https://huggingface.co/docs/transformers/model_doc/unispeech-sat)** (from Microsoft Research) released with the paper [UNISPEECH-SAT: UNIVERSAL SPEECH REPRESENTATION LEARNING WITH SPEAKER AWARE PRE-TRAINING](https://arxiv.org/abs/2110.05752) by Sanyuan Chen, Yu Wu, Chengyi Wang, Zhengyang Chen, Zhuo Chen, Shujie Liu, Jian Wu, Yao Qian, Furu Wei, Jinyu Li, Xiangzhan Yu.
|
1. **[UL2](https://huggingface.co/docs/transformers/model_doc/ul2)** (Google Research 에서) Yi Tay, Mostafa Dehghani, Vinh Q. Tran, Xavier Garcia, Dara Bahri, Tal Schuster, Huaixiu Steven Zheng, Neil Houlsby, Donald Metzle 의 [Unifying Language Learning Paradigms](https://arxiv.org/abs/2205.05131v1) 논문과 함께 발표했습니다.
|
||||||
1. **[VAN](https://huggingface.co/docs/transformers/model_doc/van)** (from Tsinghua University and Nankai University) released with the paper [Visual Attention Network](https://arxiv.org/pdf/2202.09741.pdf) by Meng-Hao Guo, Cheng-Ze Lu, Zheng-Ning Liu, Ming-Ming Cheng, Shi-Min Hu.
|
1. **[UMT5](https://huggingface.co/docs/transformers/model_doc/umt5)** (Google Research 에서 제공)은 Hyung Won Chung, Xavier Garcia, Adam Roberts, Yi Tay, Orhan Firat, Sharan Narang, Noah Constant.의 [UniMax: Fairer and More Effective Language Sampling for Large-Scale Multilingual Pretraining](https://openreview.net/forum?id=kXwdL1cWOAi)논문과 함께 발표했습니다.
|
||||||
1. **[VideoMAE](https://huggingface.co/docs/transformers/model_doc/videomae)** (from Multimedia Computing Group, Nanjing University) released with the paper [VideoMAE: Masked Autoencoders are Data-Efficient Learners for Self-Supervised Video Pre-Training](https://arxiv.org/abs/2203.12602) by Zhan Tong, Yibing Song, Jue Wang, Limin Wang.
|
1. **[UniSpeech](https://huggingface.co/docs/transformers/model_doc/unispeech)** (Microsoft Research 에서) Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei, Michael Zeng, Xuedong Huang 의 [UniSpeech: Unified Speech Representation Learning with Labeled and Unlabeled Data](https://arxiv.org/abs/2101.07597) 논문과 함께 발표했습니다.
|
||||||
1. **[ViLT](https://huggingface.co/docs/transformers/model_doc/vilt)** (from NAVER AI Lab/Kakao Enterprise/Kakao Brain) released with the paper [ViLT: Vision-and-Language Transformer Without Convolution or Region Supervision](https://arxiv.org/abs/2102.03334) by Wonjae Kim, Bokyung Son, Ildoo Kim.
|
1. **[UniSpeechSat](https://huggingface.co/docs/transformers/model_doc/unispeech-sat)** (Microsoft Research 에서) Sanyuan Chen, Yu Wu, Chengyi Wang, Zhengyang Chen, Zhuo Chen, Shujie Liu, Jian Wu, Yao Qian, Furu Wei, Jinyu Li, Xiangzhan Yu 의 [UNISPEECH-SAT: UNIVERSAL SPEECH REPRESENTATION LEARNING WITH SPEAKER AWARE PRE-TRAINING](https://arxiv.org/abs/2110.05752) 논문과 함께 발표했습니다.
|
||||||
1. **[Vision Transformer (ViT)](https://huggingface.co/docs/transformers/model_doc/vit)** (from Google AI) released with the paper [An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale](https://arxiv.org/abs/2010.11929) by Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, Neil Houlsby.
|
1. **[UPerNet](https://huggingface.co/docs/transformers/model_doc/upernet)** (Peking University 에서 제공)은 Tete Xiao, Yingcheng Liu, Bolei Zhou, Yuning Jiang, Jian Sun.의 [Unified Perceptual Parsing for Scene Understanding](https://arxiv.org/abs/1807.10221)논문과 함께 발표했습니다.
|
||||||
1. **[VisualBERT](https://huggingface.co/docs/transformers/model_doc/visual_bert)** (from UCLA NLP) released with the paper [VisualBERT: A Simple and Performant Baseline for Vision and Language](https://arxiv.org/pdf/1908.03557) by Liunian Harold Li, Mark Yatskar, Da Yin, Cho-Jui Hsieh, Kai-Wei Chang.
|
1. **[VAN](https://huggingface.co/docs/transformers/model_doc/van)** (Tsinghua University and Nankai University 에서) Meng-Hao Guo, Cheng-Ze Lu, Zheng-Ning Liu, Ming-Ming Cheng, Shi-Min Hu 의 [Visual Attention Network](https://arxiv.org/pdf/2202.09741.pdf) 논문과 함께 발표했습니다.
|
||||||
1. **[ViTMAE](https://huggingface.co/docs/transformers/model_doc/vit_mae)** (from Meta AI) released with the paper [Masked Autoencoders Are Scalable Vision Learners](https://arxiv.org/abs/2111.06377) by Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Dollár, Ross Girshick.
|
1. **[VideoMAE](https://huggingface.co/docs/transformers/model_doc/videomae)** (Multimedia Computing Group, Nanjing University 에서) Zhan Tong, Yibing Song, Jue Wang, Limin Wang 의 [VideoMAE: Masked Autoencoders are Data-Efficient Learners for Self-Supervised Video Pre-Training](https://arxiv.org/abs/2203.12602) 논문과 함께 발표했습니다.
|
||||||
1. **[ViTMSN](https://huggingface.co/docs/transformers/model_doc/vit_msn)** (from Meta AI) released with the paper [Masked Siamese Networks for Label-Efficient Learning](https://arxiv.org/abs/2204.07141) by Mahmoud Assran, Mathilde Caron, Ishan Misra, Piotr Bojanowski, Florian Bordes, Pascal Vincent, Armand Joulin, Michael Rabbat, Nicolas Ballas.
|
1. **[ViLT](https://huggingface.co/docs/transformers/model_doc/vilt)** (NAVER AI Lab/Kakao Enterprise/Kakao Brain 에서) Wonjae Kim, Bokyung Son, Ildoo Kim 의 [ViLT: Vision-and-Language Transformer Without Convolution or Region Supervision](https://arxiv.org/abs/2102.03334) 논문과 함께 발표했습니다.
|
||||||
1. **[Wav2Vec2](https://huggingface.co/docs/transformers/model_doc/wav2vec2)** (from Facebook AI) released with the paper [wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations](https://arxiv.org/abs/2006.11477) by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli.
|
1. **[Vision Transformer (ViT)](https://huggingface.co/docs/transformers/model_doc/vit)** (Google AI 에서) Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, Neil Houlsby 의 [An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale](https://arxiv.org/abs/2010.11929) 논문과 함께 발표했습니다.
|
||||||
1. **[Wav2Vec2-Conformer](https://huggingface.co/docs/transformers/model_doc/wav2vec2-conformer)** (from Facebook AI) released with the paper [FAIRSEQ S2T: Fast Speech-to-Text Modeling with FAIRSEQ](https://arxiv.org/abs/2010.05171) by Changhan Wang, Yun Tang, Xutai Ma, Anne Wu, Sravya Popuri, Dmytro Okhonko, Juan Pino.
|
1. **[VisualBERT](https://huggingface.co/docs/transformers/model_doc/visual_bert)** (UCLA NLP 에서) Liunian Harold Li, Mark Yatskar, Da Yin, Cho-Jui Hsieh, Kai-Wei Chang 의 [VisualBERT: A Simple and Performant Baseline for Vision and Language](https://arxiv.org/pdf/1908.03557) 논문과 함께 발표했습니다.
|
||||||
1. **[Wav2Vec2Phoneme](https://huggingface.co/docs/transformers/model_doc/wav2vec2_phoneme)** (from Facebook AI) released with the paper [Simple and Effective Zero-shot Cross-lingual Phoneme Recognition](https://arxiv.org/abs/2109.11680) by Qiantong Xu, Alexei Baevski, Michael Auli.
|
1. **[ViT Hybrid](https://huggingface.co/docs/transformers/model_doc/vit_hybrid)** (Google AI 에서) Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, Neil Houlsby 의 [An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale](https://arxiv.org/abs/2010.11929) 논문과 함께 발표했습니다.
|
||||||
1. **[WavLM](https://huggingface.co/docs/transformers/model_doc/wavlm)** (from Microsoft Research) released with the paper [WavLM: Large-Scale Self-Supervised Pre-Training for Full Stack Speech Processing](https://arxiv.org/abs/2110.13900) by Sanyuan Chen, Chengyi Wang, Zhengyang Chen, Yu Wu, Shujie Liu, Zhuo Chen, Jinyu Li, Naoyuki Kanda, Takuya Yoshioka, Xiong Xiao, Jian Wu, Long Zhou, Shuo Ren, Yanmin Qian, Yao Qian, Jian Wu, Michael Zeng, Furu Wei.
|
1. **[ViTMAE](https://huggingface.co/docs/transformers/model_doc/vit_mae)** (Meta AI 에서) Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Dollár, Ross Girshick 의 [Masked Autoencoders Are Scalable Vision Learners](https://arxiv.org/abs/2111.06377) 논문과 함께 발표했습니다.
|
||||||
1. **[Whisper](https://huggingface.co/docs/transformers/model_doc/whisper)** (from OpenAI) released with the paper [Robust Speech Recognition via Large-Scale Weak Supervision](https://cdn.openai.com/papers/whisper.pdf) by Alec Radford, Jong Wook Kim, Tao Xu, Greg Brockman, Christine McLeavey, Ilya Sutskever.
|
1. **[ViTMSN](https://huggingface.co/docs/transformers/model_doc/vit_msn)** (Meta AI 에서) Mahmoud Assran, Mathilde Caron, Ishan Misra, Piotr Bojanowski, Florian Bordes, Pascal Vincent, Armand Joulin, Michael Rabbat, Nicolas Ballas 의 [Masked Siamese Networks for Label-Efficient Learning](https://arxiv.org/abs/2204.07141) 논문과 함께 발표했습니다.
|
||||||
1. **[X-CLIP](https://huggingface.co/docs/transformers/model_doc/xclip)** (from Microsoft Research) released with the paper [Expanding Language-Image Pretrained Models for General Video Recognition](https://arxiv.org/abs/2208.02816) by Bolin Ni, Houwen Peng, Minghao Chen, Songyang Zhang, Gaofeng Meng, Jianlong Fu, Shiming Xiang, Haibin Ling.
|
1. **[ViViT](https://huggingface.co/docs/transformers/model_doc/vivit)** (from Google Research) released with the paper [ViViT: A Video Vision Transformer](https://arxiv.org/abs/2103.15691) by Anurag Arnab, Mostafa Dehghani, Georg Heigold, Chen Sun, Mario Lučić, Cordelia Schmid.
|
||||||
1. **[XGLM](https://huggingface.co/docs/transformers/model_doc/xglm)** (From Facebook AI) released with the paper [Few-shot Learning with Multilingual Language Models](https://arxiv.org/abs/2112.10668) by Xi Victoria Lin, Todor Mihaylov, Mikel Artetxe, Tianlu Wang, Shuohui Chen, Daniel Simig, Myle Ott, Naman Goyal, Shruti Bhosale, Jingfei Du, Ramakanth Pasunuru, Sam Shleifer, Punit Singh Koura, Vishrav Chaudhary, Brian O'Horo, Jeff Wang, Luke Zettlemoyer, Zornitsa Kozareva, Mona Diab, Veselin Stoyanov, Xian Li.
|
1. **[Wav2Vec2](https://huggingface.co/docs/transformers/model_doc/wav2vec2)** (Facebook AI 에서) Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli 의 [wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations](https://arxiv.org/abs/2006.11477) 논문과 함께 발표했습니다.
|
||||||
1. **[XLM](https://huggingface.co/docs/transformers/model_doc/xlm)** (from Facebook) released together with the paper [Cross-lingual Language Model Pretraining](https://arxiv.org/abs/1901.07291) by Guillaume Lample and Alexis Conneau.
|
1. **[Wav2Vec2-Conformer](https://huggingface.co/docs/transformers/model_doc/wav2vec2-conformer)** (Facebook AI 에서) Changhan Wang, Yun Tang, Xutai Ma, Anne Wu, Sravya Popuri, Dmytro Okhonko, Juan Pino 의 [FAIRSEQ S2T: Fast Speech-to-Text Modeling with FAIRSEQ](https://arxiv.org/abs/2010.05171) 논문과 함께 발표했습니다.
|
||||||
1. **[XLM-ProphetNet](https://huggingface.co/docs/transformers/model_doc/xlm-prophetnet)** (from Microsoft Research) released with the paper [ProphetNet: Predicting Future N-gram for Sequence-to-Sequence Pre-training](https://arxiv.org/abs/2001.04063) by Yu Yan, Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang and Ming Zhou.
|
1. **[Wav2Vec2Phoneme](https://huggingface.co/docs/transformers/model_doc/wav2vec2_phoneme)** (Facebook AI 에서) Qiantong Xu, Alexei Baevski, Michael Auli 의 [Simple and Effective Zero-shot Cross-lingual Phoneme Recognition](https://arxiv.org/abs/2109.11680) 논문과 함께 발표했습니다.
|
||||||
1. **[XLM-RoBERTa](https://huggingface.co/docs/transformers/model_doc/xlm-roberta)** (from Facebook AI), released together with the paper [Unsupervised Cross-lingual Representation Learning at Scale](https://arxiv.org/abs/1911.02116) by Alexis Conneau*, Kartikay Khandelwal*, Naman Goyal, Vishrav Chaudhary, Guillaume Wenzek, Francisco Guzmán, Edouard Grave, Myle Ott, Luke Zettlemoyer and Veselin Stoyanov.
|
1. **[WavLM](https://huggingface.co/docs/transformers/model_doc/wavlm)** (Microsoft Research 에서) Sanyuan Chen, Chengyi Wang, Zhengyang Chen, Yu Wu, Shujie Liu, Zhuo Chen, Jinyu Li, Naoyuki Kanda, Takuya Yoshioka, Xiong Xiao, Jian Wu, Long Zhou, Shuo Ren, Yanmin Qian, Yao Qian, Jian Wu, Michael Zeng, Furu Wei 의 [WavLM: Large-Scale Self-Supervised Pre-Training for Full Stack Speech Processing](https://arxiv.org/abs/2110.13900) 논문과 함께 발표했습니다.
|
||||||
1. **[XLM-RoBERTa-XL](https://huggingface.co/docs/transformers/model_doc/xlm-roberta-xl)** (from Facebook AI) released with the paper [Larger-Scale Transformers for Multilingual Masked Language Modeling](https://arxiv.org/abs/2105.00572) by Naman Goyal, Jingfei Du, Myle Ott, Giri Anantharaman, Alexis Conneau.
|
1. **[Whisper](https://huggingface.co/docs/transformers/model_doc/whisper)** (OpenAI 에서) Alec Radford, Jong Wook Kim, Tao Xu, Greg Brockman, Christine McLeavey, Ilya Sutskever 의 [Robust Speech Recognition via Large-Scale Weak Supervision](https://cdn.openai.com/papers/whisper.pdf) 논문과 함께 발표했습니다.
|
||||||
1. **[XLNet](https://huggingface.co/docs/transformers/model_doc/xlnet)** (from Google/CMU) released with the paper [XLNet: Generalized Autoregressive Pretraining for Language Understanding](https://arxiv.org/abs/1906.08237) by Zhilin Yang*, Zihang Dai*, Yiming Yang, Jaime Carbonell, Ruslan Salakhutdinov, Quoc V. Le.
|
1. **[X-CLIP](https://huggingface.co/docs/transformers/model_doc/xclip)** (Microsoft Research 에서) Bolin Ni, Houwen Peng, Minghao Chen, Songyang Zhang, Gaofeng Meng, Jianlong Fu, Shiming Xiang, Haibin Ling 의 [Expanding Language-Image Pretrained Models for General Video Recognition](https://arxiv.org/abs/2208.02816) 논문과 함께 발표했습니다.
|
||||||
1. **[XLS-R](https://huggingface.co/docs/transformers/model_doc/xls_r)** (from Facebook AI) released with the paper [XLS-R: Self-supervised Cross-lingual Speech Representation Learning at Scale](https://arxiv.org/abs/2111.09296) by Arun Babu, Changhan Wang, Andros Tjandra, Kushal Lakhotia, Qiantong Xu, Naman Goyal, Kritika Singh, Patrick von Platen, Yatharth Saraf, Juan Pino, Alexei Baevski, Alexis Conneau, Michael Auli.
|
1. **[X-MOD](https://huggingface.co/docs/transformers/model_doc/xmod)** (Meta AI 에서 제공)은 Jonas Pfeiffer, Naman Goyal, Xi Lin, Xian Li, James Cross, Sebastian Riedel, Mikel Artetxe.의 [Lifting the Curse of Multilinguality by Pre-training Modular Transformers](http://dx.doi.org/10.18653/v1/2022.naacl-main.255)논문과 함께 발표했습니다.
|
||||||
1. **[XLSR-Wav2Vec2](https://huggingface.co/docs/transformers/model_doc/xlsr_wav2vec2)** (from Facebook AI) released with the paper [Unsupervised Cross-Lingual Representation Learning For Speech Recognition](https://arxiv.org/abs/2006.13979) by Alexis Conneau, Alexei Baevski, Ronan Collobert, Abdelrahman Mohamed, Michael Auli.
|
1. **[XGLM](https://huggingface.co/docs/transformers/model_doc/xglm)** (Facebook AI 에서 제공) Xi Victoria Lin, Todor Mihaylov, Mikel Artetxe, Tianlu Wang, Shuohui Chen, Daniel Simig, Myle Ott, Naman Goyal, Shruti Bhosale, Jingfei Du, Ramakanth Pasunuru, Sam Shleifer, Punit Singh Koura, Vishrav Chaudhary, Brian O'Horo, Jeff Wang, Luke Zettlemoyer, Zornitsa Kozareva, Mona Diab, Veselin Stoyanov, Xian Li 의 [Few-shot Learning with Multilingual Language Models](https://arxiv.org/abs/2112.10668) 논문과 함께 발표했습니다.
|
||||||
1. **[YOLOS](https://huggingface.co/docs/transformers/model_doc/yolos)** (from Huazhong University of Science & Technology) released with the paper [You Only Look at One Sequence: Rethinking Transformer in Vision through Object Detection](https://arxiv.org/abs/2106.00666) by Yuxin Fang, Bencheng Liao, Xinggang Wang, Jiemin Fang, Jiyang Qi, Rui Wu, Jianwei Niu, Wenyu Liu.
|
1. **[XLM](https://huggingface.co/docs/transformers/model_doc/xlm)** (Facebook 에서) Guillaume Lample and Alexis Conneau 의 [Cross-lingual Language Model Pretraining](https://arxiv.org/abs/1901.07291) 논문과 함께 발표했습니다.
|
||||||
1. **[YOSO](https://huggingface.co/docs/transformers/model_doc/yoso)** (from the University of Wisconsin - Madison) released with the paper [You Only Sample (Almost) by Zhanpeng Zeng, Yunyang Xiong, Sathya N. Ravi, Shailesh Acharya, Glenn Fung, Vikas Singh.
|
1. **[XLM-ProphetNet](https://huggingface.co/docs/transformers/model_doc/xlm-prophetnet)** (Microsoft Research 에서) Yu Yan, Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang and Ming Zhou 의 [ProphetNet: Predicting Future N-gram for Sequence-to-Sequence Pre-training](https://arxiv.org/abs/2001.04063) 논문과 함께 발표했습니다.
|
||||||
|
1. **[XLM-RoBERTa](https://huggingface.co/docs/transformers/model_doc/xlm-roberta)** (Facebook AI 에서) Alexis Conneau*, Kartikay Khandelwal*, Naman Goyal, Vishrav Chaudhary, Guillaume Wenzek, Francisco Guzmán, Edouard Grave, Myle Ott, Luke Zettlemoyer and Veselin Stoyanov 의 [Unsupervised Cross-lingual Representation Learning at Scale](https://arxiv.org/abs/1911.02116) 논문과 함께 발표했습니다.
|
||||||
|
1. **[XLM-RoBERTa-XL](https://huggingface.co/docs/transformers/model_doc/xlm-roberta-xl)** (Facebook AI 에서) Naman Goyal, Jingfei Du, Myle Ott, Giri Anantharaman, Alexis Conneau 의 [Larger-Scale Transformers for Multilingual Masked Language Modeling](https://arxiv.org/abs/2105.00572) 논문과 함께 발표했습니다.
|
||||||
|
1. **[XLM-V](https://huggingface.co/docs/transformers/model_doc/xlm-v)** (Meta AI 에서) Davis Liang, Hila Gonen, Yuning Mao, Rui Hou, Naman Goyal, Marjan Ghazvininejad, Luke Zettlemoyer, Madian Khabsa 의 [XLM-V: Overcoming the Vocabulary Bottleneck in Multilingual Masked Language Models](https://arxiv.org/abs/2301.10472) 논문과 함께 발표했습니다.
|
||||||
|
1. **[XLNet](https://huggingface.co/docs/transformers/model_doc/xlnet)** (Google/CMU 에서) Zhilin Yang*, Zihang Dai*, Yiming Yang, Jaime Carbonell, Ruslan Salakhutdinov, Quoc V. Le 의 [XLNet: Generalized Autoregressive Pretraining for Language Understanding](https://arxiv.org/abs/1906.08237) 논문과 함께 발표했습니다.
|
||||||
|
1. **[XLS-R](https://huggingface.co/docs/transformers/model_doc/xls_r)** (Facebook AI 에서) Arun Babu, Changhan Wang, Andros Tjandra, Kushal Lakhotia, Qiantong Xu, Naman Goyal, Kritika Singh, Patrick von Platen, Yatharth Saraf, Juan Pino, Alexei Baevski, Alexis Conneau, Michael Auli 의 [XLS-R: Self-supervised Cross-lingual Speech Representation Learning at Scale](https://arxiv.org/abs/2111.09296) 논문과 함께 발표했습니다.
|
||||||
|
1. **[XLSR-Wav2Vec2](https://huggingface.co/docs/transformers/model_doc/xlsr_wav2vec2)** (Facebook AI 에서) Alexis Conneau, Alexei Baevski, Ronan Collobert, Abdelrahman Mohamed, Michael Auli 의 [Unsupervised Cross-Lingual Representation Learning For Speech Recognition](https://arxiv.org/abs/2006.13979) 논문과 함께 발표했습니다.
|
||||||
|
1. **[YOLOS](https://huggingface.co/docs/transformers/model_doc/yolos)** (Huazhong University of Science & Technology 에서) Yuxin Fang, Bencheng Liao, Xinggang Wang, Jiemin Fang, Jiyang Qi, Rui Wu, Jianwei Niu, Wenyu Liu 의 [You Only Look at One Sequence: Rethinking Transformer in Vision through Object Detection](https://arxiv.org/abs/2106.00666) 논문과 함께 발표했습니다.
|
||||||
|
1. **[YOSO](https://huggingface.co/docs/transformers/model_doc/yoso)** (the University of Wisconsin - Madison 에서) Zhanpeng Zeng, Yunyang Xiong, Sathya N. Ravi, Shailesh Acharya, Glenn Fung, Vikas Singh 의 [You Only Sample (Almost) 논문과 함께 발표했습니다.
|
||||||
1. 새로운 모델을 올리고 싶나요? 우리가 **상세한 가이드와 템플릿** 으로 새로운 모델을 올리도록 도와드릴게요. 가이드와 템플릿은 이 저장소의 [`templates`](./templates) 폴더에서 확인하실 수 있습니다. [컨트리뷰션 가이드라인](./CONTRIBUTING.md)을 꼭 확인해주시고, PR을 올리기 전에 메인테이너에게 연락하거나 이슈를 오픈해 피드백을 받으시길 바랍니다.
|
1. 새로운 모델을 올리고 싶나요? 우리가 **상세한 가이드와 템플릿** 으로 새로운 모델을 올리도록 도와드릴게요. 가이드와 템플릿은 이 저장소의 [`templates`](./templates) 폴더에서 확인하실 수 있습니다. [컨트리뷰션 가이드라인](./CONTRIBUTING.md)을 꼭 확인해주시고, PR을 올리기 전에 메인테이너에게 연락하거나 이슈를 오픈해 피드백을 받으시길 바랍니다.
|
||||||
|
|
||||||
각 모델이 Flax, PyTorch, TensorFlow으로 구현되었는지 또는 🤗 Tokenizers 라이브러리가 지원하는 토크나이저를 사용하는지 확인하려면, [이 표](https://huggingface.co/docs/transformers/index#supported-frameworks)를 확인하세요.
|
각 모델이 Flax, PyTorch, TensorFlow으로 구현되었는지 또는 🤗 Tokenizers 라이브러리가 지원하는 토크나이저를 사용하는지 확인하려면, [이 표](https://huggingface.co/docs/transformers/index#supported-frameworks)를 확인하세요.
|
||||||
|
|||||||
@ -26,7 +26,7 @@ token: 词符(并用括号标注原英文)
|
|||||||
tokenize: 词符化(并用括号标注原英文)
|
tokenize: 词符化(并用括号标注原英文)
|
||||||
tokenizer: 词符化器(并用括号标注原英文)
|
tokenizer: 词符化器(并用括号标注原英文)
|
||||||
transformer: transformer(不翻译)
|
transformer: transformer(不翻译)
|
||||||
pipeline: 流水线
|
pipeline: 流水线
|
||||||
API: API (不翻译)
|
API: API (不翻译)
|
||||||
inference: 推理
|
inference: 推理
|
||||||
Trainer: 训练器。当作为类名出现时不翻译。
|
Trainer: 训练器。当作为类名出现时不翻译。
|
||||||
@ -69,7 +69,9 @@ checkpoint: 检查点
|
|||||||
<b>简体中文</b> |
|
<b>简体中文</b> |
|
||||||
<a href="https://github.com/huggingface/transformers/blob/main/README_zh-hant.md">繁體中文</a> |
|
<a href="https://github.com/huggingface/transformers/blob/main/README_zh-hant.md">繁體中文</a> |
|
||||||
<a href="https://github.com/huggingface/transformers/blob/main/README_ko.md">한국어</a> |
|
<a href="https://github.com/huggingface/transformers/blob/main/README_ko.md">한국어</a> |
|
||||||
<a href="https://github.com/huggingface/transformers/blob/main/README_es.md">Español</a>
|
<a href="https://github.com/huggingface/transformers/blob/main/README_es.md">Español</a> |
|
||||||
|
<a href="https://github.com/huggingface/transformers/blob/main/README_ja.md">日本語</a> |
|
||||||
|
<a href="https://github.com/huggingface/transformers/blob/main/README_hd.md">हिन्दी</a>
|
||||||
<p>
|
<p>
|
||||||
</h4>
|
</h4>
|
||||||
|
|
||||||
@ -81,11 +83,11 @@ checkpoint: 检查点
|
|||||||
<a href="https://hf.co/course"><img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/course_banner.png"></a>
|
<a href="https://hf.co/course"><img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/course_banner.png"></a>
|
||||||
</h3>
|
</h3>
|
||||||
|
|
||||||
🤗 Transformers 提供了数以千计的预训练模型,支持 100 多种语言的文本分类、信息抽取、问答、摘要、翻译、文本生成。它的宗旨让最先进的 NLP 技术人人易用。
|
🤗 Transformers 提供了数以千计的预训练模型,支持 100 多种语言的文本分类、信息抽取、问答、摘要、翻译、文本生成。它的宗旨是让最先进的 NLP 技术人人易用。
|
||||||
|
|
||||||
🤗 Transformers 提供了便于快速下载和使用的API,让你可以把预训练模型用在给定文本、在你的数据集上微调然后通过 [model hub](https://huggingface.co/models) 与社区共享。同时,每个定义的 Python 模块均完全独立,方便修改和快速研究实验。
|
🤗 Transformers 提供了便于快速下载和使用的API,让你可以把预训练模型用在给定文本、在你的数据集上微调然后通过 [model hub](https://huggingface.co/models) 与社区共享。同时,每个定义的 Python 模块均完全独立,方便修改和快速研究实验。
|
||||||
|
|
||||||
🤗 Transformers 支持三个最热门的深度学习库: [Jax](https://jax.readthedocs.io/en/latest/), [PyTorch](https://pytorch.org/) and [TensorFlow](https://www.tensorflow.org/) — 并与之无缝整合。你可以直接使用一个框架训练你的模型然后用另一个加载和推理。
|
🤗 Transformers 支持三个最热门的深度学习库: [Jax](https://jax.readthedocs.io/en/latest/), [PyTorch](https://pytorch.org/) 以及 [TensorFlow](https://www.tensorflow.org/) — 并与之无缝整合。你可以直接使用一个框架训练你的模型然后用另一个加载和推理。
|
||||||
|
|
||||||
## 在线演示
|
## 在线演示
|
||||||
|
|
||||||
@ -235,6 +237,11 @@ conda install -c huggingface transformers
|
|||||||
🤗 Transformers 目前支持如下的架构(模型概述请阅[这里](https://huggingface.co/docs/transformers/model_summary)):
|
🤗 Transformers 目前支持如下的架构(模型概述请阅[这里](https://huggingface.co/docs/transformers/model_summary)):
|
||||||
|
|
||||||
1. **[ALBERT](https://huggingface.co/docs/transformers/model_doc/albert)** (来自 Google Research and the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。
|
1. **[ALBERT](https://huggingface.co/docs/transformers/model_doc/albert)** (来自 Google Research and the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。
|
||||||
|
1. **[ALIGN](https://huggingface.co/docs/transformers/model_doc/align)** (来自 Google Research) 伴随论文 [Scaling Up Visual and Vision-Language Representation Learning With Noisy Text Supervision](https://arxiv.org/abs/2102.05918) 由 Chao Jia, Yinfei Yang, Ye Xia, Yi-Ting Chen, Zarana Parekh, Hieu Pham, Quoc V. Le, Yunhsuan Sung, Zhen Li, Tom Duerig 发布。
|
||||||
|
1. **[AltCLIP](https://huggingface.co/docs/transformers/model_doc/altclip)** (来自 BAAI) 伴随论文 [AltCLIP: Altering the Language Encoder in CLIP for Extended Language Capabilities](https://arxiv.org/abs/2211.06679) 由 Chen, Zhongzhi and Liu, Guang and Zhang, Bo-Wen and Ye, Fulong and Yang, Qinghong and Wu, Ledell 发布。
|
||||||
|
1. **[Audio Spectrogram Transformer](https://huggingface.co/docs/transformers/model_doc/audio-spectrogram-transformer)** (来自 MIT) 伴随论文 [AST: Audio Spectrogram Transformer](https://arxiv.org/abs/2104.01778) 由 Yuan Gong, Yu-An Chung, James Glass 发布。
|
||||||
|
1. **[Autoformer](https://huggingface.co/docs/transformers/model_doc/autoformer)** (from Tsinghua University) released with the paper [Autoformer: Decomposition Transformers with Auto-Correlation for Long-Term Series Forecasting](https://arxiv.org/abs/2106.13008) by Haixu Wu, Jiehui Xu, Jianmin Wang, Mingsheng Long.
|
||||||
|
1. **[Bark](https://huggingface.co/docs/transformers/model_doc/bark)** (from Suno) released in the repository [suno-ai/bark](https://github.com/suno-ai/bark) by Suno AI team.
|
||||||
1. **[BART](https://huggingface.co/docs/transformers/model_doc/bart)** (来自 Facebook) 伴随论文 [BART: Denoising Sequence-to-Sequence Pre-training for Natural Language Generation, Translation, and Comprehension](https://arxiv.org/pdf/1910.13461.pdf) 由 Mike Lewis, Yinhan Liu, Naman Goyal, Marjan Ghazvininejad, Abdelrahman Mohamed, Omer Levy, Ves Stoyanov and Luke Zettlemoyer 发布。
|
1. **[BART](https://huggingface.co/docs/transformers/model_doc/bart)** (来自 Facebook) 伴随论文 [BART: Denoising Sequence-to-Sequence Pre-training for Natural Language Generation, Translation, and Comprehension](https://arxiv.org/pdf/1910.13461.pdf) 由 Mike Lewis, Yinhan Liu, Naman Goyal, Marjan Ghazvininejad, Abdelrahman Mohamed, Omer Levy, Ves Stoyanov and Luke Zettlemoyer 发布。
|
||||||
1. **[BARThez](https://huggingface.co/docs/transformers/model_doc/barthez)** (来自 École polytechnique) 伴随论文 [BARThez: a Skilled Pretrained French Sequence-to-Sequence Model](https://arxiv.org/abs/2010.12321) 由 Moussa Kamal Eddine, Antoine J.-P. Tixier, Michalis Vazirgiannis 发布。
|
1. **[BARThez](https://huggingface.co/docs/transformers/model_doc/barthez)** (来自 École polytechnique) 伴随论文 [BARThez: a Skilled Pretrained French Sequence-to-Sequence Model](https://arxiv.org/abs/2010.12321) 由 Moussa Kamal Eddine, Antoine J.-P. Tixier, Michalis Vazirgiannis 发布。
|
||||||
1. **[BARTpho](https://huggingface.co/docs/transformers/model_doc/bartpho)** (来自 VinAI Research) 伴随论文 [BARTpho: Pre-trained Sequence-to-Sequence Models for Vietnamese](https://arxiv.org/abs/2109.09701) 由 Nguyen Luong Tran, Duong Minh Le and Dat Quoc Nguyen 发布。
|
1. **[BARTpho](https://huggingface.co/docs/transformers/model_doc/bartpho)** (来自 VinAI Research) 伴随论文 [BARTpho: Pre-trained Sequence-to-Sequence Models for Vietnamese](https://arxiv.org/abs/2109.09701) 由 Nguyen Luong Tran, Duong Minh Le and Dat Quoc Nguyen 发布。
|
||||||
@ -244,19 +251,29 @@ conda install -c huggingface transformers
|
|||||||
1. **[BERTweet](https://huggingface.co/docs/transformers/model_doc/bertweet)** (来自 VinAI Research) 伴随论文 [BERTweet: A pre-trained language model for English Tweets](https://aclanthology.org/2020.emnlp-demos.2/) 由 Dat Quoc Nguyen, Thanh Vu and Anh Tuan Nguyen 发布。
|
1. **[BERTweet](https://huggingface.co/docs/transformers/model_doc/bertweet)** (来自 VinAI Research) 伴随论文 [BERTweet: A pre-trained language model for English Tweets](https://aclanthology.org/2020.emnlp-demos.2/) 由 Dat Quoc Nguyen, Thanh Vu and Anh Tuan Nguyen 发布。
|
||||||
1. **[BigBird-Pegasus](https://huggingface.co/docs/transformers/model_doc/bigbird_pegasus)** (来自 Google Research) 伴随论文 [Big Bird: Transformers for Longer Sequences](https://arxiv.org/abs/2007.14062) 由 Manzil Zaheer, Guru Guruganesh, Avinava Dubey, Joshua Ainslie, Chris Alberti, Santiago Ontanon, Philip Pham, Anirudh Ravula, Qifan Wang, Li Yang, Amr Ahmed 发布。
|
1. **[BigBird-Pegasus](https://huggingface.co/docs/transformers/model_doc/bigbird_pegasus)** (来自 Google Research) 伴随论文 [Big Bird: Transformers for Longer Sequences](https://arxiv.org/abs/2007.14062) 由 Manzil Zaheer, Guru Guruganesh, Avinava Dubey, Joshua Ainslie, Chris Alberti, Santiago Ontanon, Philip Pham, Anirudh Ravula, Qifan Wang, Li Yang, Amr Ahmed 发布。
|
||||||
1. **[BigBird-RoBERTa](https://huggingface.co/docs/transformers/model_doc/big_bird)** (来自 Google Research) 伴随论文 [Big Bird: Transformers for Longer Sequences](https://arxiv.org/abs/2007.14062) 由 Manzil Zaheer, Guru Guruganesh, Avinava Dubey, Joshua Ainslie, Chris Alberti, Santiago Ontanon, Philip Pham, Anirudh Ravula, Qifan Wang, Li Yang, Amr Ahmed 发布。
|
1. **[BigBird-RoBERTa](https://huggingface.co/docs/transformers/model_doc/big_bird)** (来自 Google Research) 伴随论文 [Big Bird: Transformers for Longer Sequences](https://arxiv.org/abs/2007.14062) 由 Manzil Zaheer, Guru Guruganesh, Avinava Dubey, Joshua Ainslie, Chris Alberti, Santiago Ontanon, Philip Pham, Anirudh Ravula, Qifan Wang, Li Yang, Amr Ahmed 发布。
|
||||||
|
1. **[BioGpt](https://huggingface.co/docs/transformers/model_doc/biogpt)** (来自 Microsoft Research AI4Science) 伴随论文 [BioGPT: generative pre-trained transformer for biomedical text generation and mining](https://academic.oup.com/bib/advance-article/doi/10.1093/bib/bbac409/6713511?guestAccessKey=a66d9b5d-4f83-4017-bb52-405815c907b9) 由 Renqian Luo, Liai Sun, Yingce Xia, Tao Qin, Sheng Zhang, Hoifung Poon and Tie-Yan Liu 发布。
|
||||||
|
1. **[BiT](https://huggingface.co/docs/transformers/model_doc/bit)** (来自 Google AI) 伴随论文 [Big Transfer (BiT) 由 Alexander Kolesnikov, Lucas Beyer, Xiaohua Zhai, Joan Puigcerver, Jessica Yung, Sylvain Gelly, Neil Houlsby 发布。
|
||||||
1. **[Blenderbot](https://huggingface.co/docs/transformers/model_doc/blenderbot)** (来自 Facebook) 伴随论文 [Recipes for building an open-domain chatbot](https://arxiv.org/abs/2004.13637) 由 Stephen Roller, Emily Dinan, Naman Goyal, Da Ju, Mary Williamson, Yinhan Liu, Jing Xu, Myle Ott, Kurt Shuster, Eric M. Smith, Y-Lan Boureau, Jason Weston 发布。
|
1. **[Blenderbot](https://huggingface.co/docs/transformers/model_doc/blenderbot)** (来自 Facebook) 伴随论文 [Recipes for building an open-domain chatbot](https://arxiv.org/abs/2004.13637) 由 Stephen Roller, Emily Dinan, Naman Goyal, Da Ju, Mary Williamson, Yinhan Liu, Jing Xu, Myle Ott, Kurt Shuster, Eric M. Smith, Y-Lan Boureau, Jason Weston 发布。
|
||||||
1. **[BlenderbotSmall](https://huggingface.co/docs/transformers/model_doc/blenderbot-small)** (来自 Facebook) 伴随论文 [Recipes for building an open-domain chatbot](https://arxiv.org/abs/2004.13637) 由 Stephen Roller, Emily Dinan, Naman Goyal, Da Ju, Mary Williamson, Yinhan Liu, Jing Xu, Myle Ott, Kurt Shuster, Eric M. Smith, Y-Lan Boureau, Jason Weston 发布。
|
1. **[BlenderbotSmall](https://huggingface.co/docs/transformers/model_doc/blenderbot-small)** (来自 Facebook) 伴随论文 [Recipes for building an open-domain chatbot](https://arxiv.org/abs/2004.13637) 由 Stephen Roller, Emily Dinan, Naman Goyal, Da Ju, Mary Williamson, Yinhan Liu, Jing Xu, Myle Ott, Kurt Shuster, Eric M. Smith, Y-Lan Boureau, Jason Weston 发布。
|
||||||
1. **[BLOOM](https://huggingface.co/docs/transformers/model_doc/bloom)** (from BigScience workshop) released by the [BigSicence Workshop](https://bigscience.huggingface.co/).
|
1. **[BLIP](https://huggingface.co/docs/transformers/model_doc/blip)** (来自 Salesforce) 伴随论文 [BLIP: Bootstrapping Language-Image Pre-training for Unified Vision-Language Understanding and Generation](https://arxiv.org/abs/2201.12086) 由 Junnan Li, Dongxu Li, Caiming Xiong, Steven Hoi 发布。
|
||||||
|
1. **[BLIP-2](https://huggingface.co/docs/transformers/model_doc/blip-2)** (来自 Salesforce) 伴随论文 [BLIP-2: Bootstrapping Language-Image Pre-training with Frozen Image Encoders and Large Language Models](https://arxiv.org/abs/2301.12597) 由 Junnan Li, Dongxu Li, Silvio Savarese, Steven Hoi 发布。
|
||||||
|
1. **[BLOOM](https://huggingface.co/docs/transformers/model_doc/bloom)** (from BigScience workshop) released by the [BigScience Workshop](https://bigscience.huggingface.co/).
|
||||||
1. **[BORT](https://huggingface.co/docs/transformers/model_doc/bort)** (来自 Alexa) 伴随论文 [Optimal Subarchitecture Extraction For BERT](https://arxiv.org/abs/2010.10499) 由 Adrian de Wynter and Daniel J. Perry 发布。
|
1. **[BORT](https://huggingface.co/docs/transformers/model_doc/bort)** (来自 Alexa) 伴随论文 [Optimal Subarchitecture Extraction For BERT](https://arxiv.org/abs/2010.10499) 由 Adrian de Wynter and Daniel J. Perry 发布。
|
||||||
|
1. **[BridgeTower](https://huggingface.co/docs/transformers/model_doc/bridgetower)** (from Harbin Institute of Technology/Microsoft Research Asia/Intel Labs) released with the paper [BridgeTower: Building Bridges Between Encoders in Vision-Language Representation Learning](https://arxiv.org/abs/2206.08657) by Xiao Xu, Chenfei Wu, Shachar Rosenman, Vasudev Lal, Wanxiang Che, Nan Duan.
|
||||||
1. **[ByT5](https://huggingface.co/docs/transformers/model_doc/byt5)** (来自 Google Research) 伴随论文 [ByT5: Towards a token-free future with pre-trained byte-to-byte models](https://arxiv.org/abs/2105.13626) 由 Linting Xue, Aditya Barua, Noah Constant, Rami Al-Rfou, Sharan Narang, Mihir Kale, Adam Roberts, Colin Raffel 发布。
|
1. **[ByT5](https://huggingface.co/docs/transformers/model_doc/byt5)** (来自 Google Research) 伴随论文 [ByT5: Towards a token-free future with pre-trained byte-to-byte models](https://arxiv.org/abs/2105.13626) 由 Linting Xue, Aditya Barua, Noah Constant, Rami Al-Rfou, Sharan Narang, Mihir Kale, Adam Roberts, Colin Raffel 发布。
|
||||||
1. **[CamemBERT](https://huggingface.co/docs/transformers/model_doc/camembert)** (来自 Inria/Facebook/Sorbonne) 伴随论文 [CamemBERT: a Tasty French Language Model](https://arxiv.org/abs/1911.03894) 由 Louis Martin*, Benjamin Muller*, Pedro Javier Ortiz Suárez*, Yoann Dupont, Laurent Romary, Éric Villemonte de la Clergerie, Djamé Seddah and Benoît Sagot 发布。
|
1. **[CamemBERT](https://huggingface.co/docs/transformers/model_doc/camembert)** (来自 Inria/Facebook/Sorbonne) 伴随论文 [CamemBERT: a Tasty French Language Model](https://arxiv.org/abs/1911.03894) 由 Louis Martin*, Benjamin Muller*, Pedro Javier Ortiz Suárez*, Yoann Dupont, Laurent Romary, Éric Villemonte de la Clergerie, Djamé Seddah and Benoît Sagot 发布。
|
||||||
1. **[CANINE](https://huggingface.co/docs/transformers/model_doc/canine)** (来自 Google Research) 伴随论文 [CANINE: Pre-training an Efficient Tokenization-Free Encoder for Language Representation](https://arxiv.org/abs/2103.06874) 由 Jonathan H. Clark, Dan Garrette, Iulia Turc, John Wieting 发布。
|
1. **[CANINE](https://huggingface.co/docs/transformers/model_doc/canine)** (来自 Google Research) 伴随论文 [CANINE: Pre-training an Efficient Tokenization-Free Encoder for Language Representation](https://arxiv.org/abs/2103.06874) 由 Jonathan H. Clark, Dan Garrette, Iulia Turc, John Wieting 发布。
|
||||||
|
1. **[Chinese-CLIP](https://huggingface.co/docs/transformers/model_doc/chinese_clip)** (来自 OFA-Sys) 伴随论文 [Chinese CLIP: Contrastive Vision-Language Pretraining in Chinese](https://arxiv.org/abs/2211.01335) 由 An Yang, Junshu Pan, Junyang Lin, Rui Men, Yichang Zhang, Jingren Zhou, Chang Zhou 发布。
|
||||||
|
1. **[CLAP](https://huggingface.co/docs/transformers/model_doc/clap)** (来自 LAION-AI) 伴随论文 [Large-scale Contrastive Language-Audio Pretraining with Feature Fusion and Keyword-to-Caption Augmentation](https://arxiv.org/abs/2211.06687) 由 Yusong Wu, Ke Chen, Tianyu Zhang, Yuchen Hui, Taylor Berg-Kirkpatrick, Shlomo Dubnov 发布。
|
||||||
1. **[CLIP](https://huggingface.co/docs/transformers/model_doc/clip)** (来自 OpenAI) 伴随论文 [Learning Transferable Visual Models From Natural Language Supervision](https://arxiv.org/abs/2103.00020) 由 Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, Ilya Sutskever 发布。
|
1. **[CLIP](https://huggingface.co/docs/transformers/model_doc/clip)** (来自 OpenAI) 伴随论文 [Learning Transferable Visual Models From Natural Language Supervision](https://arxiv.org/abs/2103.00020) 由 Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, Ilya Sutskever 发布。
|
||||||
|
1. **[CLIPSeg](https://huggingface.co/docs/transformers/model_doc/clipseg)** (来自 University of Göttingen) 伴随论文 [Image Segmentation Using Text and Image Prompts](https://arxiv.org/abs/2112.10003) 由 Timo Lüddecke and Alexander Ecker 发布。
|
||||||
1. **[CodeGen](https://huggingface.co/docs/transformers/model_doc/codegen)** (来自 Salesforce) 伴随论文 [A Conversational Paradigm for Program Synthesis](https://arxiv.org/abs/2203.13474) 由 Erik Nijkamp, Bo Pang, Hiroaki Hayashi, Lifu Tu, Huan Wang, Yingbo Zhou, Silvio Savarese, Caiming Xiong 发布。
|
1. **[CodeGen](https://huggingface.co/docs/transformers/model_doc/codegen)** (来自 Salesforce) 伴随论文 [A Conversational Paradigm for Program Synthesis](https://arxiv.org/abs/2203.13474) 由 Erik Nijkamp, Bo Pang, Hiroaki Hayashi, Lifu Tu, Huan Wang, Yingbo Zhou, Silvio Savarese, Caiming Xiong 发布。
|
||||||
1. **[Conditional DETR](https://huggingface.co/docs/transformers/model_doc/conditional_detr)** (来自 Microsoft Research Asia) 伴随论文 [Conditional DETR for Fast Training Convergence](https://arxiv.org/abs/2108.06152) 由 Depu Meng, Xiaokang Chen, Zejia Fan, Gang Zeng, Houqiang Li, Yuhui Yuan, Lei Sun, Jingdong Wang 发布。
|
1. **[Conditional DETR](https://huggingface.co/docs/transformers/model_doc/conditional_detr)** (来自 Microsoft Research Asia) 伴随论文 [Conditional DETR for Fast Training Convergence](https://arxiv.org/abs/2108.06152) 由 Depu Meng, Xiaokang Chen, Zejia Fan, Gang Zeng, Houqiang Li, Yuhui Yuan, Lei Sun, Jingdong Wang 发布。
|
||||||
1. **[ConvBERT](https://huggingface.co/docs/transformers/model_doc/convbert)** (来自 YituTech) 伴随论文 [ConvBERT: Improving BERT with Span-based Dynamic Convolution](https://arxiv.org/abs/2008.02496) 由 Zihang Jiang, Weihao Yu, Daquan Zhou, Yunpeng Chen, Jiashi Feng, Shuicheng Yan 发布。
|
1. **[ConvBERT](https://huggingface.co/docs/transformers/model_doc/convbert)** (来自 YituTech) 伴随论文 [ConvBERT: Improving BERT with Span-based Dynamic Convolution](https://arxiv.org/abs/2008.02496) 由 Zihang Jiang, Weihao Yu, Daquan Zhou, Yunpeng Chen, Jiashi Feng, Shuicheng Yan 发布。
|
||||||
1. **[ConvNeXT](https://huggingface.co/docs/transformers/model_doc/convnext)** (来自 Facebook AI) 伴随论文 [A ConvNet for the 2020s](https://arxiv.org/abs/2201.03545) 由 Zhuang Liu, Hanzi Mao, Chao-Yuan Wu, Christoph Feichtenhofer, Trevor Darrell, Saining Xie 发布。
|
1. **[ConvNeXT](https://huggingface.co/docs/transformers/model_doc/convnext)** (来自 Facebook AI) 伴随论文 [A ConvNet for the 2020s](https://arxiv.org/abs/2201.03545) 由 Zhuang Liu, Hanzi Mao, Chao-Yuan Wu, Christoph Feichtenhofer, Trevor Darrell, Saining Xie 发布。
|
||||||
|
1. **[ConvNeXTV2](https://huggingface.co/docs/transformers/model_doc/convnextv2)** (from Facebook AI) released with the paper [ConvNeXt V2: Co-designing and Scaling ConvNets with Masked Autoencoders](https://arxiv.org/abs/2301.00808) by Sanghyun Woo, Shoubhik Debnath, Ronghang Hu, Xinlei Chen, Zhuang Liu, In So Kweon, Saining Xie.
|
||||||
1. **[CPM](https://huggingface.co/docs/transformers/model_doc/cpm)** (来自 Tsinghua University) 伴随论文 [CPM: A Large-scale Generative Chinese Pre-trained Language Model](https://arxiv.org/abs/2012.00413) 由 Zhengyan Zhang, Xu Han, Hao Zhou, Pei Ke, Yuxian Gu, Deming Ye, Yujia Qin, Yusheng Su, Haozhe Ji, Jian Guan, Fanchao Qi, Xiaozhi Wang, Yanan Zheng, Guoyang Zeng, Huanqi Cao, Shengqi Chen, Daixuan Li, Zhenbo Sun, Zhiyuan Liu, Minlie Huang, Wentao Han, Jie Tang, Juanzi Li, Xiaoyan Zhu, Maosong Sun 发布。
|
1. **[CPM](https://huggingface.co/docs/transformers/model_doc/cpm)** (来自 Tsinghua University) 伴随论文 [CPM: A Large-scale Generative Chinese Pre-trained Language Model](https://arxiv.org/abs/2012.00413) 由 Zhengyan Zhang, Xu Han, Hao Zhou, Pei Ke, Yuxian Gu, Deming Ye, Yujia Qin, Yusheng Su, Haozhe Ji, Jian Guan, Fanchao Qi, Xiaozhi Wang, Yanan Zheng, Guoyang Zeng, Huanqi Cao, Shengqi Chen, Daixuan Li, Zhenbo Sun, Zhiyuan Liu, Minlie Huang, Wentao Han, Jie Tang, Juanzi Li, Xiaoyan Zhu, Maosong Sun 发布。
|
||||||
|
1. **[CPM-Ant](https://huggingface.co/docs/transformers/model_doc/cpmant)** (from OpenBMB) released by the [OpenBMB](https://www.openbmb.org/).
|
||||||
1. **[CTRL](https://huggingface.co/docs/transformers/model_doc/ctrl)** (来自 Salesforce) 伴随论文 [CTRL: A Conditional Transformer Language Model for Controllable Generation](https://arxiv.org/abs/1909.05858) 由 Nitish Shirish Keskar*, Bryan McCann*, Lav R. Varshney, Caiming Xiong and Richard Socher 发布。
|
1. **[CTRL](https://huggingface.co/docs/transformers/model_doc/ctrl)** (来自 Salesforce) 伴随论文 [CTRL: A Conditional Transformer Language Model for Controllable Generation](https://arxiv.org/abs/1909.05858) 由 Nitish Shirish Keskar*, Bryan McCann*, Lav R. Varshney, Caiming Xiong and Richard Socher 发布。
|
||||||
1. **[CvT](https://huggingface.co/docs/transformers/model_doc/cvt)** (来自 Microsoft) 伴随论文 [CvT: Introducing Convolutions to Vision Transformers](https://arxiv.org/abs/2103.15808) 由 Haiping Wu, Bin Xiao, Noel Codella, Mengchen Liu, Xiyang Dai, Lu Yuan, Lei Zhang 发布。
|
1. **[CvT](https://huggingface.co/docs/transformers/model_doc/cvt)** (来自 Microsoft) 伴随论文 [CvT: Introducing Convolutions to Vision Transformers](https://arxiv.org/abs/2103.15808) 由 Haiping Wu, Bin Xiao, Noel Codella, Mengchen Liu, Xiyang Dai, Lu Yuan, Lei Zhang 发布。
|
||||||
1. **[Data2Vec](https://huggingface.co/docs/transformers/model_doc/data2vec)** (来自 Facebook) 伴随论文 [Data2Vec: A General Framework for Self-supervised Learning in Speech, Vision and Language](https://arxiv.org/abs/2202.03555) 由 Alexei Baevski, Wei-Ning Hsu, Qiantong Xu, Arun Babu, Jiatao Gu, Michael Auli 发布。
|
1. **[Data2Vec](https://huggingface.co/docs/transformers/model_doc/data2vec)** (来自 Facebook) 伴随论文 [Data2Vec: A General Framework for Self-supervised Learning in Speech, Vision and Language](https://arxiv.org/abs/2202.03555) 由 Alexei Baevski, Wei-Ning Hsu, Qiantong Xu, Arun Babu, Jiatao Gu, Michael Auli 发布。
|
||||||
@ -265,21 +282,33 @@ conda install -c huggingface transformers
|
|||||||
1. **[Decision Transformer](https://huggingface.co/docs/transformers/model_doc/decision_transformer)** (来自 Berkeley/Facebook/Google) 伴随论文 [Decision Transformer: Reinforcement Learning via Sequence Modeling](https://arxiv.org/abs/2106.01345) 由 Lili Chen, Kevin Lu, Aravind Rajeswaran, Kimin Lee, Aditya Grover, Michael Laskin, Pieter Abbeel, Aravind Srinivas, Igor Mordatch 发布。
|
1. **[Decision Transformer](https://huggingface.co/docs/transformers/model_doc/decision_transformer)** (来自 Berkeley/Facebook/Google) 伴随论文 [Decision Transformer: Reinforcement Learning via Sequence Modeling](https://arxiv.org/abs/2106.01345) 由 Lili Chen, Kevin Lu, Aravind Rajeswaran, Kimin Lee, Aditya Grover, Michael Laskin, Pieter Abbeel, Aravind Srinivas, Igor Mordatch 发布。
|
||||||
1. **[Deformable DETR](https://huggingface.co/docs/transformers/model_doc/deformable_detr)** (来自 SenseTime Research) 伴随论文 [Deformable DETR: Deformable Transformers for End-to-End Object Detection](https://arxiv.org/abs/2010.04159) 由 Xizhou Zhu, Weijie Su, Lewei Lu, Bin Li, Xiaogang Wang, Jifeng Dai 发布。
|
1. **[Deformable DETR](https://huggingface.co/docs/transformers/model_doc/deformable_detr)** (来自 SenseTime Research) 伴随论文 [Deformable DETR: Deformable Transformers for End-to-End Object Detection](https://arxiv.org/abs/2010.04159) 由 Xizhou Zhu, Weijie Su, Lewei Lu, Bin Li, Xiaogang Wang, Jifeng Dai 发布。
|
||||||
1. **[DeiT](https://huggingface.co/docs/transformers/model_doc/deit)** (来自 Facebook) 伴随论文 [Training data-efficient image transformers & distillation through attention](https://arxiv.org/abs/2012.12877) 由 Hugo Touvron, Matthieu Cord, Matthijs Douze, Francisco Massa, Alexandre Sablayrolles, Hervé Jégou 发布。
|
1. **[DeiT](https://huggingface.co/docs/transformers/model_doc/deit)** (来自 Facebook) 伴随论文 [Training data-efficient image transformers & distillation through attention](https://arxiv.org/abs/2012.12877) 由 Hugo Touvron, Matthieu Cord, Matthijs Douze, Francisco Massa, Alexandre Sablayrolles, Hervé Jégou 发布。
|
||||||
|
1. **[DePlot](https://huggingface.co/docs/transformers/model_doc/deplot)** (来自 Google AI) 伴随论文 [DePlot: One-shot visual language reasoning by plot-to-table translation](https://arxiv.org/abs/2212.10505) 由 Fangyu Liu, Julian Martin Eisenschlos, Francesco Piccinno, Syrine Krichene, Chenxi Pang, Kenton Lee, Mandar Joshi, Wenhu Chen, Nigel Collier, Yasemin Altun 发布。
|
||||||
|
1. **[DETA](https://huggingface.co/docs/transformers/model_doc/deta)** (来自 The University of Texas at Austin) 伴随论文 [NMS Strikes Back](https://arxiv.org/abs/2212.06137) 由 Jeffrey Ouyang-Zhang, Jang Hyun Cho, Xingyi Zhou, Philipp Krähenbühl 发布。
|
||||||
1. **[DETR](https://huggingface.co/docs/transformers/model_doc/detr)** (来自 Facebook) 伴随论文 [End-to-End Object Detection with Transformers](https://arxiv.org/abs/2005.12872) 由 Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, Sergey Zagoruyko 发布。
|
1. **[DETR](https://huggingface.co/docs/transformers/model_doc/detr)** (来自 Facebook) 伴随论文 [End-to-End Object Detection with Transformers](https://arxiv.org/abs/2005.12872) 由 Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, Sergey Zagoruyko 发布。
|
||||||
1. **[DialoGPT](https://huggingface.co/docs/transformers/model_doc/dialogpt)** (来自 Microsoft Research) 伴随论文 [DialoGPT: Large-Scale Generative Pre-training for Conversational Response Generation](https://arxiv.org/abs/1911.00536) 由 Yizhe Zhang, Siqi Sun, Michel Galley, Yen-Chun Chen, Chris Brockett, Xiang Gao, Jianfeng Gao, Jingjing Liu, Bill Dolan 发布。
|
1. **[DialoGPT](https://huggingface.co/docs/transformers/model_doc/dialogpt)** (来自 Microsoft Research) 伴随论文 [DialoGPT: Large-Scale Generative Pre-training for Conversational Response Generation](https://arxiv.org/abs/1911.00536) 由 Yizhe Zhang, Siqi Sun, Michel Galley, Yen-Chun Chen, Chris Brockett, Xiang Gao, Jianfeng Gao, Jingjing Liu, Bill Dolan 发布。
|
||||||
|
1. **[DiNAT](https://huggingface.co/docs/transformers/model_doc/dinat)** (来自 SHI Labs) 伴随论文 [Dilated Neighborhood Attention Transformer](https://arxiv.org/abs/2209.15001) 由 Ali Hassani and Humphrey Shi 发布。
|
||||||
1. **[DistilBERT](https://huggingface.co/docs/transformers/model_doc/distilbert)** (来自 HuggingFace), 伴随论文 [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 同样的方法也应用于压缩 GPT-2 到 [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa 到 [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation), Multilingual BERT 到 [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) 和德语版 DistilBERT。
|
1. **[DistilBERT](https://huggingface.co/docs/transformers/model_doc/distilbert)** (来自 HuggingFace), 伴随论文 [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 同样的方法也应用于压缩 GPT-2 到 [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa 到 [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation), Multilingual BERT 到 [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) 和德语版 DistilBERT。
|
||||||
1. **[DiT](https://huggingface.co/docs/transformers/model_doc/dit)** (来自 Microsoft Research) 伴随论文 [DiT: Self-supervised Pre-training for Document Image Transformer](https://arxiv.org/abs/2203.02378) 由 Junlong Li, Yiheng Xu, Tengchao Lv, Lei Cui, Cha Zhang, Furu Wei 发布。
|
1. **[DiT](https://huggingface.co/docs/transformers/model_doc/dit)** (来自 Microsoft Research) 伴随论文 [DiT: Self-supervised Pre-training for Document Image Transformer](https://arxiv.org/abs/2203.02378) 由 Junlong Li, Yiheng Xu, Tengchao Lv, Lei Cui, Cha Zhang, Furu Wei 发布。
|
||||||
1. **[Donut](https://huggingface.co/docs/transformers/model_doc/donut)** (来自 NAVER) 伴随论文 [OCR-free Document Understanding Transformer](https://arxiv.org/abs/2111.15664) 由 Geewook Kim, Teakgyu Hong, Moonbin Yim, Jeongyeon Nam, Jinyoung Park, Jinyeong Yim, Wonseok Hwang, Sangdoo Yun, Dongyoon Han, Seunghyun Park 发布。
|
1. **[Donut](https://huggingface.co/docs/transformers/model_doc/donut)** (来自 NAVER) 伴随论文 [OCR-free Document Understanding Transformer](https://arxiv.org/abs/2111.15664) 由 Geewook Kim, Teakgyu Hong, Moonbin Yim, Jeongyeon Nam, Jinyoung Park, Jinyeong Yim, Wonseok Hwang, Sangdoo Yun, Dongyoon Han, Seunghyun Park 发布。
|
||||||
1. **[DPR](https://huggingface.co/docs/transformers/model_doc/dpr)** (来自 Facebook) 伴随论文 [Dense Passage Retrieval for Open-Domain Question Answering](https://arxiv.org/abs/2004.04906) 由 Vladimir Karpukhin, Barlas Oğuz, Sewon Min, Patrick Lewis, Ledell Wu, Sergey Edunov, Danqi Chen, and Wen-tau Yih 发布。
|
1. **[DPR](https://huggingface.co/docs/transformers/model_doc/dpr)** (来自 Facebook) 伴随论文 [Dense Passage Retrieval for Open-Domain Question Answering](https://arxiv.org/abs/2004.04906) 由 Vladimir Karpukhin, Barlas Oğuz, Sewon Min, Patrick Lewis, Ledell Wu, Sergey Edunov, Danqi Chen, and Wen-tau Yih 发布。
|
||||||
1. **[DPT](https://huggingface.co/docs/transformers/master/model_doc/dpt)** (来自 Intel Labs) 伴随论文 [Vision Transformers for Dense Prediction](https://arxiv.org/abs/2103.13413) 由 René Ranftl, Alexey Bochkovskiy, Vladlen Koltun 发布。
|
1. **[DPT](https://huggingface.co/docs/transformers/master/model_doc/dpt)** (来自 Intel Labs) 伴随论文 [Vision Transformers for Dense Prediction](https://arxiv.org/abs/2103.13413) 由 René Ranftl, Alexey Bochkovskiy, Vladlen Koltun 发布。
|
||||||
|
1. **[EfficientFormer](https://huggingface.co/docs/transformers/model_doc/efficientformer)** (来自 Snap Research) 伴随论文 [EfficientFormer: Vision Transformers at MobileNetSpeed](https://arxiv.org/abs/2206.01191) 由 Yanyu Li, Geng Yuan, Yang Wen, Ju Hu, Georgios Evangelidis, Sergey Tulyakov, Yanzhi Wang, Jian Ren 发布。
|
||||||
|
1. **[EfficientNet](https://huggingface.co/docs/transformers/model_doc/efficientnet)** (from Google Brain) released with the paper [EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks](https://arxiv.org/abs/1905.11946) by Mingxing Tan, Quoc V. Le.
|
||||||
1. **[ELECTRA](https://huggingface.co/docs/transformers/model_doc/electra)** (来自 Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le, Christopher D. Manning 发布。
|
1. **[ELECTRA](https://huggingface.co/docs/transformers/model_doc/electra)** (来自 Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le, Christopher D. Manning 发布。
|
||||||
|
1. **[EnCodec](https://huggingface.co/docs/transformers/model_doc/encodec)** (来自 Meta AI) 伴随论文 [High Fidelity Neural Audio Compression](https://arxiv.org/abs/2210.13438) 由 Alexandre Défossez, Jade Copet, Gabriel Synnaeve, Yossi Adi 发布。
|
||||||
1. **[EncoderDecoder](https://huggingface.co/docs/transformers/model_doc/encoder-decoder)** (来自 Google Research) 伴随论文 [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://arxiv.org/abs/1907.12461) 由 Sascha Rothe, Shashi Narayan, Aliaksei Severyn 发布。
|
1. **[EncoderDecoder](https://huggingface.co/docs/transformers/model_doc/encoder-decoder)** (来自 Google Research) 伴随论文 [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://arxiv.org/abs/1907.12461) 由 Sascha Rothe, Shashi Narayan, Aliaksei Severyn 发布。
|
||||||
1. **[ERNIE](https://huggingface.co/docs/transformers/model_doc/ernie)** (来自 Baidu) 伴随论文 [ERNIE: Enhanced Representation through Knowledge Integration](https://arxiv.org/abs/1904.09223) by Yu Sun, Shuohuan Wang, Yukun Li, Shikun Feng, Xuyi Chen, Han Zhang, Xin Tian, Danxiang Zhu, Hao Tian, Hua Wu 发布。
|
1. **[ERNIE](https://huggingface.co/docs/transformers/model_doc/ernie)** (来自 Baidu) 伴随论文 [ERNIE: Enhanced Representation through Knowledge Integration](https://arxiv.org/abs/1904.09223) by Yu Sun, Shuohuan Wang, Yukun Li, Shikun Feng, Xuyi Chen, Han Zhang, Xin Tian, Danxiang Zhu, Hao Tian, Hua Wu 发布。
|
||||||
|
1. **[ErnieM](https://huggingface.co/docs/transformers/model_doc/ernie_m)** (来自 Baidu) 伴随论文 [ERNIE-M: Enhanced Multilingual Representation by Aligning Cross-lingual Semantics with Monolingual Corpora](https://arxiv.org/abs/2012.15674) 由 Xuan Ouyang, Shuohuan Wang, Chao Pang, Yu Sun, Hao Tian, Hua Wu, Haifeng Wang 发布。
|
||||||
1. **[ESM](https://huggingface.co/docs/transformers/model_doc/esm)** (from Meta AI) are transformer protein language models. **ESM-1b** was released with the paper [Biological structure and function emerge from scaling unsupervised learning to 250 million protein sequences](https://www.pnas.org/content/118/15/e2016239118) by Alexander Rives, Joshua Meier, Tom Sercu, Siddharth Goyal, Zeming Lin, Jason Liu, Demi Guo, Myle Ott, C. Lawrence Zitnick, Jerry Ma, and Rob Fergus. **ESM-1v** was released with the paper [Language models enable zero-shot prediction of the effects of mutations on protein function](https://doi.org/10.1101/2021.07.09.450648) by Joshua Meier, Roshan Rao, Robert Verkuil, Jason Liu, Tom Sercu and Alexander Rives. **ESM-2** was released with the paper [Language models of protein sequences at the scale of evolution enable accurate structure prediction](https://doi.org/10.1101/2022.07.20.500902) by Zeming Lin, Halil Akin, Roshan Rao, Brian Hie, Zhongkai Zhu, Wenting Lu, Allan dos Santos Costa, Maryam Fazel-Zarandi, Tom Sercu, Sal Candido, Alexander Rives.
|
1. **[ESM](https://huggingface.co/docs/transformers/model_doc/esm)** (from Meta AI) are transformer protein language models. **ESM-1b** was released with the paper [Biological structure and function emerge from scaling unsupervised learning to 250 million protein sequences](https://www.pnas.org/content/118/15/e2016239118) by Alexander Rives, Joshua Meier, Tom Sercu, Siddharth Goyal, Zeming Lin, Jason Liu, Demi Guo, Myle Ott, C. Lawrence Zitnick, Jerry Ma, and Rob Fergus. **ESM-1v** was released with the paper [Language models enable zero-shot prediction of the effects of mutations on protein function](https://doi.org/10.1101/2021.07.09.450648) by Joshua Meier, Roshan Rao, Robert Verkuil, Jason Liu, Tom Sercu and Alexander Rives. **ESM-2** was released with the paper [Language models of protein sequences at the scale of evolution enable accurate structure prediction](https://doi.org/10.1101/2022.07.20.500902) by Zeming Lin, Halil Akin, Roshan Rao, Brian Hie, Zhongkai Zhu, Wenting Lu, Allan dos Santos Costa, Maryam Fazel-Zarandi, Tom Sercu, Sal Candido, Alexander Rives.
|
||||||
|
1. **[Falcon](https://huggingface.co/docs/transformers/model_doc/falcon)** (from Technology Innovation Institute) by Almazrouei, Ebtesam and Alobeidli, Hamza and Alshamsi, Abdulaziz and Cappelli, Alessandro and Cojocaru, Ruxandra and Debbah, Merouane and Goffinet, Etienne and Heslow, Daniel and Launay, Julien and Malartic, Quentin and Noune, Badreddine and Pannier, Baptiste and Penedo, Guilherme.
|
||||||
|
1. **[FLAN-T5](https://huggingface.co/docs/transformers/model_doc/flan-t5)** (from Google AI) released in the repository [google-research/t5x](https://github.com/google-research/t5x/blob/main/docs/models.md#flan-t5-checkpoints) by Hyung Won Chung, Le Hou, Shayne Longpre, Barret Zoph, Yi Tay, William Fedus, Eric Li, Xuezhi Wang, Mostafa Dehghani, Siddhartha Brahma, Albert Webson, Shixiang Shane Gu, Zhuyun Dai, Mirac Suzgun, Xinyun Chen, Aakanksha Chowdhery, Sharan Narang, Gaurav Mishra, Adams Yu, Vincent Zhao, Yanping Huang, Andrew Dai, Hongkun Yu, Slav Petrov, Ed H. Chi, Jeff Dean, Jacob Devlin, Adam Roberts, Denny Zhou, Quoc V. Le, and Jason Wei
|
||||||
|
1. **[FLAN-UL2](https://huggingface.co/docs/transformers/model_doc/flan-ul2)** (from Google AI) released in the repository [google-research/t5x](https://github.com/google-research/t5x/blob/main/docs/models.md#flan-ul2-checkpoints) by Hyung Won Chung, Le Hou, Shayne Longpre, Barret Zoph, Yi Tay, William Fedus, Eric Li, Xuezhi Wang, Mostafa Dehghani, Siddhartha Brahma, Albert Webson, Shixiang Shane Gu, Zhuyun Dai, Mirac Suzgun, Xinyun Chen, Aakanksha Chowdhery, Sharan Narang, Gaurav Mishra, Adams Yu, Vincent Zhao, Yanping Huang, Andrew Dai, Hongkun Yu, Slav Petrov, Ed H. Chi, Jeff Dean, Jacob Devlin, Adam Roberts, Denny Zhou, Quoc V. Le, and Jason Wei
|
||||||
1. **[FlauBERT](https://huggingface.co/docs/transformers/model_doc/flaubert)** (来自 CNRS) 伴随论文 [FlauBERT: Unsupervised Language Model Pre-training for French](https://arxiv.org/abs/1912.05372) 由 Hang Le, Loïc Vial, Jibril Frej, Vincent Segonne, Maximin Coavoux, Benjamin Lecouteux, Alexandre Allauzen, Benoît Crabbé, Laurent Besacier, Didier Schwab 发布。
|
1. **[FlauBERT](https://huggingface.co/docs/transformers/model_doc/flaubert)** (来自 CNRS) 伴随论文 [FlauBERT: Unsupervised Language Model Pre-training for French](https://arxiv.org/abs/1912.05372) 由 Hang Le, Loïc Vial, Jibril Frej, Vincent Segonne, Maximin Coavoux, Benjamin Lecouteux, Alexandre Allauzen, Benoît Crabbé, Laurent Besacier, Didier Schwab 发布。
|
||||||
1. **[FLAVA](https://huggingface.co/docs/transformers/model_doc/flava)** (来自 Facebook AI) 伴随论文 [FLAVA: A Foundational Language And Vision Alignment Model](https://arxiv.org/abs/2112.04482) 由 Amanpreet Singh, Ronghang Hu, Vedanuj Goswami, Guillaume Couairon, Wojciech Galuba, Marcus Rohrbach, and Douwe Kiela 发布。
|
1. **[FLAVA](https://huggingface.co/docs/transformers/model_doc/flava)** (来自 Facebook AI) 伴随论文 [FLAVA: A Foundational Language And Vision Alignment Model](https://arxiv.org/abs/2112.04482) 由 Amanpreet Singh, Ronghang Hu, Vedanuj Goswami, Guillaume Couairon, Wojciech Galuba, Marcus Rohrbach, and Douwe Kiela 发布。
|
||||||
1. **[FNet](https://huggingface.co/docs/transformers/model_doc/fnet)** (来自 Google Research) 伴随论文 [FNet: Mixing Tokens with Fourier Transforms](https://arxiv.org/abs/2105.03824) 由 James Lee-Thorp, Joshua Ainslie, Ilya Eckstein, Santiago Ontanon 发布。
|
1. **[FNet](https://huggingface.co/docs/transformers/model_doc/fnet)** (来自 Google Research) 伴随论文 [FNet: Mixing Tokens with Fourier Transforms](https://arxiv.org/abs/2105.03824) 由 James Lee-Thorp, Joshua Ainslie, Ilya Eckstein, Santiago Ontanon 发布。
|
||||||
|
1. **[FocalNet](https://huggingface.co/docs/transformers/model_doc/focalnet)** (来自 Microsoft Research) 伴随论文 [Focal Modulation Networks](https://arxiv.org/abs/2203.11926) 由 Jianwei Yang, Chunyuan Li, Xiyang Dai, Lu Yuan, Jianfeng Gao 发布。
|
||||||
1. **[Funnel Transformer](https://huggingface.co/docs/transformers/model_doc/funnel)** (来自 CMU/Google Brain) 伴随论文 [Funnel-Transformer: Filtering out Sequential Redundancy for Efficient Language Processing](https://arxiv.org/abs/2006.03236) 由 Zihang Dai, Guokun Lai, Yiming Yang, Quoc V. Le 发布。
|
1. **[Funnel Transformer](https://huggingface.co/docs/transformers/model_doc/funnel)** (来自 CMU/Google Brain) 伴随论文 [Funnel-Transformer: Filtering out Sequential Redundancy for Efficient Language Processing](https://arxiv.org/abs/2006.03236) 由 Zihang Dai, Guokun Lai, Yiming Yang, Quoc V. Le 发布。
|
||||||
|
1. **[GIT](https://huggingface.co/docs/transformers/model_doc/git)** (来自 Microsoft Research) 伴随论文 [GIT: A Generative Image-to-text Transformer for Vision and Language](https://arxiv.org/abs/2205.14100) 由 Jianfeng Wang, Zhengyuan Yang, Xiaowei Hu, Linjie Li, Kevin Lin, Zhe Gan, Zicheng Liu, Ce Liu, Lijuan Wang 发布。
|
||||||
1. **[GLPN](https://huggingface.co/docs/transformers/model_doc/glpn)** (来自 KAIST) 伴随论文 [Global-Local Path Networks for Monocular Depth Estimation with Vertical CutDepth](https://arxiv.org/abs/2201.07436) 由 Doyeon Kim, Woonghyun Ga, Pyungwhan Ahn, Donggyu Joo, Sehwan Chun, Junmo Kim 发布。
|
1. **[GLPN](https://huggingface.co/docs/transformers/model_doc/glpn)** (来自 KAIST) 伴随论文 [Global-Local Path Networks for Monocular Depth Estimation with Vertical CutDepth](https://arxiv.org/abs/2201.07436) 由 Doyeon Kim, Woonghyun Ga, Pyungwhan Ahn, Donggyu Joo, Sehwan Chun, Junmo Kim 发布。
|
||||||
1. **[GPT](https://huggingface.co/docs/transformers/model_doc/openai-gpt)** (来自 OpenAI) 伴随论文 [Improving Language Understanding by Generative Pre-Training](https://blog.openai.com/language-unsupervised/) 由 Alec Radford, Karthik Narasimhan, Tim Salimans and Ilya Sutskever 发布。
|
1. **[GPT](https://huggingface.co/docs/transformers/model_doc/openai-gpt)** (来自 OpenAI) 伴随论文 [Improving Language Understanding by Generative Pre-Training](https://blog.openai.com/language-unsupervised/) 由 Alec Radford, Karthik Narasimhan, Tim Salimans and Ilya Sutskever 发布。
|
||||||
1. **[GPT Neo](https://huggingface.co/docs/transformers/model_doc/gpt_neo)** (来自 EleutherAI) 随仓库 [EleutherAI/gpt-neo](https://github.com/EleutherAI/gpt-neo) 发布。作者为 Sid Black, Stella Biderman, Leo Gao, Phil Wang and Connor Leahy 发布。
|
1. **[GPT Neo](https://huggingface.co/docs/transformers/model_doc/gpt_neo)** (来自 EleutherAI) 随仓库 [EleutherAI/gpt-neo](https://github.com/EleutherAI/gpt-neo) 发布。作者为 Sid Black, Stella Biderman, Leo Gao, Phil Wang and Connor Leahy 发布。
|
||||||
@ -287,17 +316,26 @@ conda install -c huggingface transformers
|
|||||||
1. **[GPT NeoX Japanese](https://huggingface.co/docs/transformers/model_doc/gpt_neox_japanese)** (来自 ABEJA) 由 Shinya Otani, Takayoshi Makabe, Anuj Arora, Kyo Hattori。
|
1. **[GPT NeoX Japanese](https://huggingface.co/docs/transformers/model_doc/gpt_neox_japanese)** (来自 ABEJA) 由 Shinya Otani, Takayoshi Makabe, Anuj Arora, Kyo Hattori。
|
||||||
1. **[GPT-2](https://huggingface.co/docs/transformers/model_doc/gpt2)** (来自 OpenAI) 伴随论文 [Language Models are Unsupervised Multitask Learners](https://blog.openai.com/better-language-models/) 由 Alec Radford*, Jeffrey Wu*, Rewon Child, David Luan, Dario Amodei** and Ilya Sutskever** 发布。
|
1. **[GPT-2](https://huggingface.co/docs/transformers/model_doc/gpt2)** (来自 OpenAI) 伴随论文 [Language Models are Unsupervised Multitask Learners](https://blog.openai.com/better-language-models/) 由 Alec Radford*, Jeffrey Wu*, Rewon Child, David Luan, Dario Amodei** and Ilya Sutskever** 发布。
|
||||||
1. **[GPT-J](https://huggingface.co/docs/transformers/model_doc/gptj)** (来自 EleutherAI) 伴随论文 [kingoflolz/mesh-transformer-jax](https://github.com/kingoflolz/mesh-transformer-jax/) 由 Ben Wang and Aran Komatsuzaki 发布。
|
1. **[GPT-J](https://huggingface.co/docs/transformers/model_doc/gptj)** (来自 EleutherAI) 伴随论文 [kingoflolz/mesh-transformer-jax](https://github.com/kingoflolz/mesh-transformer-jax/) 由 Ben Wang and Aran Komatsuzaki 发布。
|
||||||
|
1. **[GPT-Sw3](https://huggingface.co/docs/transformers/model_doc/gpt-sw3)** (from AI-Sweden) released with the paper [Lessons Learned from GPT-SW3: Building the First Large-Scale Generative Language Model for Swedish](http://www.lrec-conf.org/proceedings/lrec2022/pdf/2022.lrec-1.376.pdf) by Ariel Ekgren, Amaru Cuba Gyllensten, Evangelia Gogoulou, Alice Heiman, Severine Verlinden, Joey Öhman, Fredrik Carlsson, Magnus Sahlgren.
|
||||||
|
1. **[GPTBigCode](https://huggingface.co/docs/transformers/model_doc/gpt_bigcode)** (来自 BigCode) 伴随论文 [SantaCoder: don't reach for the stars!](https://arxiv.org/abs/2301.03988) 由 Loubna Ben Allal, Raymond Li, Denis Kocetkov, Chenghao Mou, Christopher Akiki, Carlos Munoz Ferrandis, Niklas Muennighoff, Mayank Mishra, Alex Gu, Manan Dey, Logesh Kumar Umapathi, Carolyn Jane Anderson, Yangtian Zi, Joel Lamy Poirier, Hailey Schoelkopf, Sergey Troshin, Dmitry Abulkhanov, Manuel Romero, Michael Lappert, Francesco De Toni, Bernardo García del Río, Qian Liu, Shamik Bose, Urvashi Bhattacharyya, Terry Yue Zhuo, Ian Yu, Paulo Villegas, Marco Zocca, Sourab Mangrulkar, David Lansky, Huu Nguyen, Danish Contractor, Luis Villa, Jia Li, Dzmitry Bahdanau, Yacine Jernite, Sean Hughes, Daniel Fried, Arjun Guha, Harm de Vries, Leandro von Werra 发布。
|
||||||
|
1. **[GPTSAN-japanese](https://huggingface.co/docs/transformers/model_doc/gptsan-japanese)** released in the repository [tanreinama/GPTSAN](https://github.com/tanreinama/GPTSAN/blob/main/report/model.md) by 坂本俊之(tanreinama).
|
||||||
|
1. **[Graphormer](https://huggingface.co/docs/transformers/model_doc/graphormer)** (from Microsoft) released with the paper [Do Transformers Really Perform Bad for Graph Representation?](https://arxiv.org/abs/2106.05234) by Chengxuan Ying, Tianle Cai, Shengjie Luo, Shuxin Zheng, Guolin Ke, Di He, Yanming Shen, Tie-Yan Liu.
|
||||||
1. **[GroupViT](https://huggingface.co/docs/transformers/model_doc/groupvit)** (来自 UCSD, NVIDIA) 伴随论文 [GroupViT: Semantic Segmentation Emerges from Text Supervision](https://arxiv.org/abs/2202.11094) 由 Jiarui Xu, Shalini De Mello, Sifei Liu, Wonmin Byeon, Thomas Breuel, Jan Kautz, Xiaolong Wang 发布。
|
1. **[GroupViT](https://huggingface.co/docs/transformers/model_doc/groupvit)** (来自 UCSD, NVIDIA) 伴随论文 [GroupViT: Semantic Segmentation Emerges from Text Supervision](https://arxiv.org/abs/2202.11094) 由 Jiarui Xu, Shalini De Mello, Sifei Liu, Wonmin Byeon, Thomas Breuel, Jan Kautz, Xiaolong Wang 发布。
|
||||||
1. **[Hubert](https://huggingface.co/docs/transformers/model_doc/hubert)** (来自 Facebook) 伴随论文 [HuBERT: Self-Supervised Speech Representation Learning by Masked Prediction of Hidden Units](https://arxiv.org/abs/2106.07447) 由 Wei-Ning Hsu, Benjamin Bolte, Yao-Hung Hubert Tsai, Kushal Lakhotia, Ruslan Salakhutdinov, Abdelrahman Mohamed 发布。
|
1. **[Hubert](https://huggingface.co/docs/transformers/model_doc/hubert)** (来自 Facebook) 伴随论文 [HuBERT: Self-Supervised Speech Representation Learning by Masked Prediction of Hidden Units](https://arxiv.org/abs/2106.07447) 由 Wei-Ning Hsu, Benjamin Bolte, Yao-Hung Hubert Tsai, Kushal Lakhotia, Ruslan Salakhutdinov, Abdelrahman Mohamed 发布。
|
||||||
1. **[I-BERT](https://huggingface.co/docs/transformers/model_doc/ibert)** (来自 Berkeley) 伴随论文 [I-BERT: Integer-only BERT Quantization](https://arxiv.org/abs/2101.01321) 由 Sehoon Kim, Amir Gholami, Zhewei Yao, Michael W. Mahoney, Kurt Keutzer 发布。
|
1. **[I-BERT](https://huggingface.co/docs/transformers/model_doc/ibert)** (来自 Berkeley) 伴随论文 [I-BERT: Integer-only BERT Quantization](https://arxiv.org/abs/2101.01321) 由 Sehoon Kim, Amir Gholami, Zhewei Yao, Michael W. Mahoney, Kurt Keutzer 发布。
|
||||||
1. **[ImageGPT](https://huggingface.co/docs/transformers/model_doc/imagegpt)** (来自 OpenAI) 伴随论文 [Generative Pretraining from Pixels](https://openai.com/blog/image-gpt/) 由 Mark Chen, Alec Radford, Rewon Child, Jeffrey Wu, Heewoo Jun, David Luan, Ilya Sutskever 发布。
|
1. **[ImageGPT](https://huggingface.co/docs/transformers/model_doc/imagegpt)** (来自 OpenAI) 伴随论文 [Generative Pretraining from Pixels](https://openai.com/blog/image-gpt/) 由 Mark Chen, Alec Radford, Rewon Child, Jeffrey Wu, Heewoo Jun, David Luan, Ilya Sutskever 发布。
|
||||||
|
1. **[Informer](https://huggingface.co/docs/transformers/model_doc/informer)** (from Beihang University, UC Berkeley, Rutgers University, SEDD Company) released with the paper [Informer: Beyond Efficient Transformer for Long Sequence Time-Series Forecasting](https://arxiv.org/abs/2012.07436) by Haoyi Zhou, Shanghang Zhang, Jieqi Peng, Shuai Zhang, Jianxin Li, Hui Xiong, and Wancai Zhang.
|
||||||
|
1. **[InstructBLIP](https://huggingface.co/docs/transformers/model_doc/instructblip)** (来自 Salesforce) 伴随论文 [InstructBLIP: Towards General-purpose Vision-Language Models with Instruction Tuning](https://arxiv.org/abs/2305.06500) 由 Wenliang Dai, Junnan Li, Dongxu Li, Anthony Meng Huat Tiong, Junqi Zhao, Weisheng Wang, Boyang Li, Pascale Fung, Steven Hoi 发布。
|
||||||
|
1. **[Jukebox](https://huggingface.co/docs/transformers/model_doc/jukebox)** (from OpenAI) released with the paper [Jukebox: A Generative Model for Music](https://arxiv.org/pdf/2005.00341.pdf) by Prafulla Dhariwal, Heewoo Jun, Christine Payne, Jong Wook Kim, Alec Radford, Ilya Sutskever.
|
||||||
1. **[LayoutLM](https://huggingface.co/docs/transformers/model_doc/layoutlm)** (来自 Microsoft Research Asia) 伴随论文 [LayoutLM: Pre-training of Text and Layout for Document Image Understanding](https://arxiv.org/abs/1912.13318) 由 Yiheng Xu, Minghao Li, Lei Cui, Shaohan Huang, Furu Wei, Ming Zhou 发布。
|
1. **[LayoutLM](https://huggingface.co/docs/transformers/model_doc/layoutlm)** (来自 Microsoft Research Asia) 伴随论文 [LayoutLM: Pre-training of Text and Layout for Document Image Understanding](https://arxiv.org/abs/1912.13318) 由 Yiheng Xu, Minghao Li, Lei Cui, Shaohan Huang, Furu Wei, Ming Zhou 发布。
|
||||||
1. **[LayoutLMv2](https://huggingface.co/docs/transformers/model_doc/layoutlmv2)** (来自 Microsoft Research Asia) 伴随论文 [LayoutLMv2: Multi-modal Pre-training for Visually-Rich Document Understanding](https://arxiv.org/abs/2012.14740) 由 Yang Xu, Yiheng Xu, Tengchao Lv, Lei Cui, Furu Wei, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Wanxiang Che, Min Zhang, Lidong Zhou 发布。
|
1. **[LayoutLMv2](https://huggingface.co/docs/transformers/model_doc/layoutlmv2)** (来自 Microsoft Research Asia) 伴随论文 [LayoutLMv2: Multi-modal Pre-training for Visually-Rich Document Understanding](https://arxiv.org/abs/2012.14740) 由 Yang Xu, Yiheng Xu, Tengchao Lv, Lei Cui, Furu Wei, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Wanxiang Che, Min Zhang, Lidong Zhou 发布。
|
||||||
1. **[LayoutLMv3](https://huggingface.co/docs/transformers/model_doc/layoutlmv3)** (来自 Microsoft Research Asia) 伴随论文 [LayoutLMv3: Pre-training for Document AI with Unified Text and Image Masking](https://arxiv.org/abs/2204.08387) 由 Yupan Huang, Tengchao Lv, Lei Cui, Yutong Lu, Furu Wei 发布。
|
1. **[LayoutLMv3](https://huggingface.co/docs/transformers/model_doc/layoutlmv3)** (来自 Microsoft Research Asia) 伴随论文 [LayoutLMv3: Pre-training for Document AI with Unified Text and Image Masking](https://arxiv.org/abs/2204.08387) 由 Yupan Huang, Tengchao Lv, Lei Cui, Yutong Lu, Furu Wei 发布。
|
||||||
1. **[LayoutXLM](https://huggingface.co/docs/transformers/model_doc/layoutxlm)** (来自 Microsoft Research Asia) 伴随论文 [LayoutXLM: Multimodal Pre-training for Multilingual Visually-rich Document Understanding](https://arxiv.org/abs/2104.08836) 由 Yiheng Xu, Tengchao Lv, Lei Cui, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Furu Wei 发布。
|
1. **[LayoutXLM](https://huggingface.co/docs/transformers/model_doc/layoutxlm)** (来自 Microsoft Research Asia) 伴随论文 [LayoutXLM: Multimodal Pre-training for Multilingual Visually-rich Document Understanding](https://arxiv.org/abs/2104.08836) 由 Yiheng Xu, Tengchao Lv, Lei Cui, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Furu Wei 发布。
|
||||||
1. **[LED](https://huggingface.co/docs/transformers/model_doc/led)** (来自 AllenAI) 伴随论文 [Longformer: The Long-Document Transformer](https://arxiv.org/abs/2004.05150) 由 Iz Beltagy, Matthew E. Peters, Arman Cohan 发布。
|
1. **[LED](https://huggingface.co/docs/transformers/model_doc/led)** (来自 AllenAI) 伴随论文 [Longformer: The Long-Document Transformer](https://arxiv.org/abs/2004.05150) 由 Iz Beltagy, Matthew E. Peters, Arman Cohan 发布。
|
||||||
1. **[LeViT](https://huggingface.co/docs/transformers/model_doc/levit)** (来自 Meta AI) 伴随论文 [LeViT: A Vision Transformer in ConvNet's Clothing for Faster Inference](https://arxiv.org/abs/2104.01136) 由 Ben Graham, Alaaeldin El-Nouby, Hugo Touvron, Pierre Stock, Armand Joulin, Hervé Jégou, Matthijs Douze 发布。
|
1. **[LeViT](https://huggingface.co/docs/transformers/model_doc/levit)** (来自 Meta AI) 伴随论文 [LeViT: A Vision Transformer in ConvNet's Clothing for Faster Inference](https://arxiv.org/abs/2104.01136) 由 Ben Graham, Alaaeldin El-Nouby, Hugo Touvron, Pierre Stock, Armand Joulin, Hervé Jégou, Matthijs Douze 发布。
|
||||||
1. **[LiLT](https://huggingface.co/docs/transformers/main/model_doc/lilt)** (来自 South China University of Technology) 伴随论文 [LiLT: A Simple yet Effective Language-Independent Layout Transformer for Structured Document Understanding](https://arxiv.org/abs/2202.13669) 由 Jiapeng Wang, Lianwen Jin, Kai Ding 发布。
|
1. **[LiLT](https://huggingface.co/docs/transformers/model_doc/lilt)** (来自 South China University of Technology) 伴随论文 [LiLT: A Simple yet Effective Language-Independent Layout Transformer for Structured Document Understanding](https://arxiv.org/abs/2202.13669) 由 Jiapeng Wang, Lianwen Jin, Kai Ding 发布。
|
||||||
|
1. **[LLaMA](https://huggingface.co/docs/transformers/model_doc/llama)** (来自 The FAIR team of Meta AI) 伴随论文 [LLaMA: Open and Efficient Foundation Language Models](https://arxiv.org/abs/2302.13971) 由 Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timothée Lacroix, Baptiste Rozière, Naman Goyal, Eric Hambro, Faisal Azhar, Aurelien Rodriguez, Armand Joulin, Edouard Grave, Guillaume Lample 发布。
|
||||||
|
1. **[Llama2](https://huggingface.co/docs/transformers/model_doc/llama2)** (来自 The FAIR team of Meta AI) 伴随论文 [Llama2: Open Foundation and Fine-Tuned Chat Models](https://ai.meta.com/research/publications/llama-2-open-foundation-and-fine-tuned-chat-models/XXX) 由 Hugo Touvron, Louis Martin, Kevin Stone, Peter Albert, Amjad Almahairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajjwal Bhargava, Shruti Bhosale, Dan Bikel, Lukas Blecher, Cristian Canton Ferrer, Moya Chen, Guillem Cucurull, David Esiobu, Jude Fernandes, Jeremy Fu, Wenyin Fu, Brian Fuller, Cynthia Gao, Vedanuj Goswami, Naman Goyal, Anthony Hartshorn, Saghar Hosseini, Rui Hou, Hakan Inan, Marcin Kardas, Viktor Kerkez Madian Khabsa, Isabel Kloumann, Artem Korenev, Punit Singh Koura, Marie-Anne Lachaux, Thibaut Lavril, Jenya Lee, Diana Liskovich, Yinghai Lu, Yuning Mao, Xavier Martinet, Todor Mihaylov, Pushka rMishra, Igor Molybog, Yixin Nie, Andrew Poulton, Jeremy Reizenstein, Rashi Rungta, Kalyan Saladi, Alan Schelten, Ruan Silva, Eric Michael Smith, Ranjan Subramanian, Xiaoqing EllenTan, Binh Tang, Ross Taylor, Adina Williams, Jian Xiang Kuan, Puxin Xu, Zheng Yan, Iliyan Zarov, Yuchen Zhang, Angela Fan, Melanie Kambadur, Sharan Narang, Aurelien Rodriguez, Robert Stojnic, Sergey Edunov, Thomas Scialom. 发布。
|
||||||
1. **[Longformer](https://huggingface.co/docs/transformers/model_doc/longformer)** (来自 AllenAI) 伴随论文 [Longformer: The Long-Document Transformer](https://arxiv.org/abs/2004.05150) 由 Iz Beltagy, Matthew E. Peters, Arman Cohan 发布。
|
1. **[Longformer](https://huggingface.co/docs/transformers/model_doc/longformer)** (来自 AllenAI) 伴随论文 [Longformer: The Long-Document Transformer](https://arxiv.org/abs/2004.05150) 由 Iz Beltagy, Matthew E. Peters, Arman Cohan 发布。
|
||||||
1. **[LongT5](https://huggingface.co/docs/transformers/model_doc/longt5)** (来自 Google AI) released 伴随论文 [LongT5: Efficient Text-To-Text Transformer for Long Sequences](https://arxiv.org/abs/2112.07916) 由 Mandy Guo, Joshua Ainslie, David Uthus, Santiago Ontanon, Jianmo Ni, Yun-Hsuan Sung, Yinfei Yang 发布。
|
1. **[LongT5](https://huggingface.co/docs/transformers/model_doc/longt5)** (来自 Google AI) released 伴随论文 [LongT5: Efficient Text-To-Text Transformer for Long Sequences](https://arxiv.org/abs/2112.07916) 由 Mandy Guo, Joshua Ainslie, David Uthus, Santiago Ontanon, Jianmo Ni, Yun-Hsuan Sung, Yinfei Yang 发布。
|
||||||
1. **[LUKE](https://huggingface.co/docs/transformers/model_doc/luke)** (来自 Studio Ousia) 伴随论文 [LUKE: Deep Contextualized Entity Representations with Entity-aware Self-attention](https://arxiv.org/abs/2010.01057) 由 Ikuya Yamada, Akari Asai, Hiroyuki Shindo, Hideaki Takeda, Yuji Matsumoto 发布。
|
1. **[LUKE](https://huggingface.co/docs/transformers/model_doc/luke)** (来自 Studio Ousia) 伴随论文 [LUKE: Deep Contextualized Entity Representations with Entity-aware Self-attention](https://arxiv.org/abs/2010.01057) 由 Ikuya Yamada, Akari Asai, Hiroyuki Shindo, Hideaki Takeda, Yuji Matsumoto 发布。
|
||||||
@ -306,26 +344,41 @@ conda install -c huggingface transformers
|
|||||||
1. **[M2M100](https://huggingface.co/docs/transformers/model_doc/m2m_100)** (来自 Facebook) 伴随论文 [Beyond English-Centric Multilingual Machine Translation](https://arxiv.org/abs/2010.11125) 由 Angela Fan, Shruti Bhosale, Holger Schwenk, Zhiyi Ma, Ahmed El-Kishky, Siddharth Goyal, Mandeep Baines, Onur Celebi, Guillaume Wenzek, Vishrav Chaudhary, Naman Goyal, Tom Birch, Vitaliy Liptchinsky, Sergey Edunov, Edouard Grave, Michael Auli, Armand Joulin 发布。
|
1. **[M2M100](https://huggingface.co/docs/transformers/model_doc/m2m_100)** (来自 Facebook) 伴随论文 [Beyond English-Centric Multilingual Machine Translation](https://arxiv.org/abs/2010.11125) 由 Angela Fan, Shruti Bhosale, Holger Schwenk, Zhiyi Ma, Ahmed El-Kishky, Siddharth Goyal, Mandeep Baines, Onur Celebi, Guillaume Wenzek, Vishrav Chaudhary, Naman Goyal, Tom Birch, Vitaliy Liptchinsky, Sergey Edunov, Edouard Grave, Michael Auli, Armand Joulin 发布。
|
||||||
1. **[MarianMT](https://huggingface.co/docs/transformers/model_doc/marian)** 用 [OPUS](http://opus.nlpl.eu/) 数据训练的机器翻译模型由 Jörg Tiedemann 发布。[Marian Framework](https://marian-nmt.github.io/) 由微软翻译团队开发。
|
1. **[MarianMT](https://huggingface.co/docs/transformers/model_doc/marian)** 用 [OPUS](http://opus.nlpl.eu/) 数据训练的机器翻译模型由 Jörg Tiedemann 发布。[Marian Framework](https://marian-nmt.github.io/) 由微软翻译团队开发。
|
||||||
1. **[MarkupLM](https://huggingface.co/docs/transformers/model_doc/markuplm)** (来自 Microsoft Research Asia) 伴随论文 [MarkupLM: Pre-training of Text and Markup Language for Visually-rich Document Understanding](https://arxiv.org/abs/2110.08518) 由 Junlong Li, Yiheng Xu, Lei Cui, Furu Wei 发布。
|
1. **[MarkupLM](https://huggingface.co/docs/transformers/model_doc/markuplm)** (来自 Microsoft Research Asia) 伴随论文 [MarkupLM: Pre-training of Text and Markup Language for Visually-rich Document Understanding](https://arxiv.org/abs/2110.08518) 由 Junlong Li, Yiheng Xu, Lei Cui, Furu Wei 发布。
|
||||||
1. **[MaskFormer](https://huggingface.co/docs/transformers/model_doc/maskformer)** (from Meta and UIUC) released with the paper [Per-Pixel Classification is Not All You Need for Semantic Segmentation](https://arxiv.org/abs/2107.06278) by Bowen Cheng, Alexander G. Schwing, Alexander Kirillov >>>>>>> Fix rebase
|
1. **[Mask2Former](https://huggingface.co/docs/transformers/model_doc/mask2former)** (来自 FAIR and UIUC) 伴随论文 [Masked-attention Mask Transformer for Universal Image Segmentation](https://arxiv.org/abs/2112.01527) 由 Bowen Cheng, Ishan Misra, Alexander G. Schwing, Alexander Kirillov, Rohit Girdhar 发布。
|
||||||
|
1. **[MaskFormer](https://huggingface.co/docs/transformers/model_doc/maskformer)** (from Meta and UIUC) released with the paper [Per-Pixel Classification is Not All You Need for Semantic Segmentation](https://arxiv.org/abs/2107.06278) by Bowen Cheng, Alexander G. Schwing, Alexander Kirillov
|
||||||
|
1. **[MatCha](https://huggingface.co/docs/transformers/model_doc/matcha)** (来自 Google AI) 伴随论文 [MatCha: Enhancing Visual Language Pretraining with Math Reasoning and Chart Derendering](https://arxiv.org/abs/2212.09662) 由 Fangyu Liu, Francesco Piccinno, Syrine Krichene, Chenxi Pang, Kenton Lee, Mandar Joshi, Yasemin Altun, Nigel Collier, Julian Martin Eisenschlos 发布。
|
||||||
1. **[mBART](https://huggingface.co/docs/transformers/model_doc/mbart)** (来自 Facebook) 伴随论文 [Multilingual Denoising Pre-training for Neural Machine Translation](https://arxiv.org/abs/2001.08210) 由 Yinhan Liu, Jiatao Gu, Naman Goyal, Xian Li, Sergey Edunov, Marjan Ghazvininejad, Mike Lewis, Luke Zettlemoyer 发布。
|
1. **[mBART](https://huggingface.co/docs/transformers/model_doc/mbart)** (来自 Facebook) 伴随论文 [Multilingual Denoising Pre-training for Neural Machine Translation](https://arxiv.org/abs/2001.08210) 由 Yinhan Liu, Jiatao Gu, Naman Goyal, Xian Li, Sergey Edunov, Marjan Ghazvininejad, Mike Lewis, Luke Zettlemoyer 发布。
|
||||||
1. **[mBART-50](https://huggingface.co/docs/transformers/model_doc/mbart)** (来自 Facebook) 伴随论文 [Multilingual Translation with Extensible Multilingual Pretraining and Finetuning](https://arxiv.org/abs/2008.00401) 由 Yuqing Tang, Chau Tran, Xian Li, Peng-Jen Chen, Naman Goyal, Vishrav Chaudhary, Jiatao Gu, Angela Fan 发布。
|
1. **[mBART-50](https://huggingface.co/docs/transformers/model_doc/mbart)** (来自 Facebook) 伴随论文 [Multilingual Translation with Extensible Multilingual Pretraining and Finetuning](https://arxiv.org/abs/2008.00401) 由 Yuqing Tang, Chau Tran, Xian Li, Peng-Jen Chen, Naman Goyal, Vishrav Chaudhary, Jiatao Gu, Angela Fan 发布。
|
||||||
|
1. **[MEGA](https://huggingface.co/docs/transformers/model_doc/mega)** (来自 Facebook) 伴随论文 [Mega: Moving Average Equipped Gated Attention](https://arxiv.org/abs/2209.10655) 由 Xuezhe Ma, Chunting Zhou, Xiang Kong, Junxian He, Liangke Gui, Graham Neubig, Jonathan May, and Luke Zettlemoyer 发布。
|
||||||
1. **[Megatron-BERT](https://huggingface.co/docs/transformers/model_doc/megatron-bert)** (来自 NVIDIA) 伴随论文 [Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism](https://arxiv.org/abs/1909.08053) 由 Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper and Bryan Catanzaro 发布。
|
1. **[Megatron-BERT](https://huggingface.co/docs/transformers/model_doc/megatron-bert)** (来自 NVIDIA) 伴随论文 [Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism](https://arxiv.org/abs/1909.08053) 由 Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper and Bryan Catanzaro 发布。
|
||||||
1. **[Megatron-GPT2](https://huggingface.co/docs/transformers/model_doc/megatron_gpt2)** (来自 NVIDIA) 伴随论文 [Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism](https://arxiv.org/abs/1909.08053) 由 Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper and Bryan Catanzaro 发布。
|
1. **[Megatron-GPT2](https://huggingface.co/docs/transformers/model_doc/megatron_gpt2)** (来自 NVIDIA) 伴随论文 [Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism](https://arxiv.org/abs/1909.08053) 由 Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper and Bryan Catanzaro 发布。
|
||||||
|
1. **[MGP-STR](https://huggingface.co/docs/transformers/model_doc/mgp-str)** (来自 Alibaba Research) 伴随论文 [Multi-Granularity Prediction for Scene Text Recognition](https://arxiv.org/abs/2209.03592) 由 Peng Wang, Cheng Da, and Cong Yao 发布。
|
||||||
1. **[mLUKE](https://huggingface.co/docs/transformers/model_doc/mluke)** (来自 Studio Ousia) 伴随论文 [mLUKE: The Power of Entity Representations in Multilingual Pretrained Language Models](https://arxiv.org/abs/2110.08151) 由 Ryokan Ri, Ikuya Yamada, and Yoshimasa Tsuruoka 发布。
|
1. **[mLUKE](https://huggingface.co/docs/transformers/model_doc/mluke)** (来自 Studio Ousia) 伴随论文 [mLUKE: The Power of Entity Representations in Multilingual Pretrained Language Models](https://arxiv.org/abs/2110.08151) 由 Ryokan Ri, Ikuya Yamada, and Yoshimasa Tsuruoka 发布。
|
||||||
|
1. **[MMS](https://huggingface.co/docs/transformers/model_doc/mms)** (来自 Facebook) 伴随论文 [Scaling Speech Technology to 1,000+ Languages](https://arxiv.org/abs/2305.13516) 由 Vineel Pratap, Andros Tjandra, Bowen Shi, Paden Tomasello, Arun Babu, Sayani Kundu, Ali Elkahky, Zhaoheng Ni, Apoorv Vyas, Maryam Fazel-Zarandi, Alexei Baevski, Yossi Adi, Xiaohui Zhang, Wei-Ning Hsu, Alexis Conneau, Michael Auli 发布。
|
||||||
1. **[MobileBERT](https://huggingface.co/docs/transformers/model_doc/mobilebert)** (来自 CMU/Google Brain) 伴随论文 [MobileBERT: a Compact Task-Agnostic BERT for Resource-Limited Devices](https://arxiv.org/abs/2004.02984) 由 Zhiqing Sun, Hongkun Yu, Xiaodan Song, Renjie Liu, Yiming Yang, and Denny Zhou 发布。
|
1. **[MobileBERT](https://huggingface.co/docs/transformers/model_doc/mobilebert)** (来自 CMU/Google Brain) 伴随论文 [MobileBERT: a Compact Task-Agnostic BERT for Resource-Limited Devices](https://arxiv.org/abs/2004.02984) 由 Zhiqing Sun, Hongkun Yu, Xiaodan Song, Renjie Liu, Yiming Yang, and Denny Zhou 发布。
|
||||||
|
1. **[MobileNetV1](https://huggingface.co/docs/transformers/model_doc/mobilenet_v1)** (来自 Google Inc.) 伴随论文 [MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications](https://arxiv.org/abs/1704.04861) 由 Andrew G. Howard, Menglong Zhu, Bo Chen, Dmitry Kalenichenko, Weijun Wang, Tobias Weyand, Marco Andreetto, Hartwig Adam 发布。
|
||||||
|
1. **[MobileNetV2](https://huggingface.co/docs/transformers/model_doc/mobilenet_v2)** (来自 Google Inc.) 伴随论文 [MobileNetV2: Inverted Residuals and Linear Bottlenecks](https://arxiv.org/abs/1801.04381) 由 Mark Sandler, Andrew Howard, Menglong Zhu, Andrey Zhmoginov, Liang-Chieh Chen 发布。
|
||||||
1. **[MobileViT](https://huggingface.co/docs/transformers/model_doc/mobilevit)** (来自 Apple) 伴随论文 [MobileViT: Light-weight, General-purpose, and Mobile-friendly Vision Transformer](https://arxiv.org/abs/2110.02178) 由 Sachin Mehta and Mohammad Rastegari 发布。
|
1. **[MobileViT](https://huggingface.co/docs/transformers/model_doc/mobilevit)** (来自 Apple) 伴随论文 [MobileViT: Light-weight, General-purpose, and Mobile-friendly Vision Transformer](https://arxiv.org/abs/2110.02178) 由 Sachin Mehta and Mohammad Rastegari 发布。
|
||||||
|
1. **[MobileViTV2](https://huggingface.co/docs/transformers/model_doc/mobilevitv2)** (来自 Apple) 伴随论文 [Separable Self-attention for Mobile Vision Transformers](https://arxiv.org/abs/2206.02680) 由 Sachin Mehta and Mohammad Rastegari 发布。
|
||||||
1. **[MPNet](https://huggingface.co/docs/transformers/model_doc/mpnet)** (来自 Microsoft Research) 伴随论文 [MPNet: Masked and Permuted Pre-training for Language Understanding](https://arxiv.org/abs/2004.09297) 由 Kaitao Song, Xu Tan, Tao Qin, Jianfeng Lu, Tie-Yan Liu 发布。
|
1. **[MPNet](https://huggingface.co/docs/transformers/model_doc/mpnet)** (来自 Microsoft Research) 伴随论文 [MPNet: Masked and Permuted Pre-training for Language Understanding](https://arxiv.org/abs/2004.09297) 由 Kaitao Song, Xu Tan, Tao Qin, Jianfeng Lu, Tie-Yan Liu 发布。
|
||||||
|
1. **[MRA](https://huggingface.co/docs/transformers/model_doc/mra)** (来自 the University of Wisconsin - Madison) 伴随论文 [Multi Resolution Analysis (MRA)](https://arxiv.org/abs/2207.10284) 由 Zhanpeng Zeng, Sourav Pal, Jeffery Kline, Glenn M Fung, Vikas Singh 发布。
|
||||||
1. **[MT5](https://huggingface.co/docs/transformers/model_doc/mt5)** (来自 Google AI) 伴随论文 [mT5: A massively multilingual pre-trained text-to-text transformer](https://arxiv.org/abs/2010.11934) 由 Linting Xue, Noah Constant, Adam Roberts, Mihir Kale, Rami Al-Rfou, Aditya Siddhant, Aditya Barua, Colin Raffel 发布。
|
1. **[MT5](https://huggingface.co/docs/transformers/model_doc/mt5)** (来自 Google AI) 伴随论文 [mT5: A massively multilingual pre-trained text-to-text transformer](https://arxiv.org/abs/2010.11934) 由 Linting Xue, Noah Constant, Adam Roberts, Mihir Kale, Rami Al-Rfou, Aditya Siddhant, Aditya Barua, Colin Raffel 发布。
|
||||||
|
1. **[MusicGen](https://huggingface.co/docs/transformers/model_doc/musicgen)** (from Meta) released with the paper [Simple and Controllable Music Generation](https://arxiv.org/abs/2306.05284) by Jade Copet, Felix Kreuk, Itai Gat, Tal Remez, David Kant, Gabriel Synnaeve, Yossi Adi and Alexandre Défossez.
|
||||||
1. **[MVP](https://huggingface.co/docs/transformers/model_doc/mvp)** (来自 中国人民大学 AI Box) 伴随论文 [MVP: Multi-task Supervised Pre-training for Natural Language Generation](https://arxiv.org/abs/2206.12131) 由 Tianyi Tang, Junyi Li, Wayne Xin Zhao and Ji-Rong Wen 发布。
|
1. **[MVP](https://huggingface.co/docs/transformers/model_doc/mvp)** (来自 中国人民大学 AI Box) 伴随论文 [MVP: Multi-task Supervised Pre-training for Natural Language Generation](https://arxiv.org/abs/2206.12131) 由 Tianyi Tang, Junyi Li, Wayne Xin Zhao and Ji-Rong Wen 发布。
|
||||||
|
1. **[NAT](https://huggingface.co/docs/transformers/model_doc/nat)** (来自 SHI Labs) 伴随论文 [Neighborhood Attention Transformer](https://arxiv.org/abs/2204.07143) 由 Ali Hassani, Steven Walton, Jiachen Li, Shen Li, and Humphrey Shi 发布。
|
||||||
1. **[Nezha](https://huggingface.co/docs/transformers/model_doc/nezha)** (来自华为诺亚方舟实验室) 伴随论文 [NEZHA: Neural Contextualized Representation for Chinese Language Understanding](https://arxiv.org/abs/1909.00204) 由 Junqiu Wei, Xiaozhe Ren, Xiaoguang Li, Wenyong Huang, Yi Liao, Yasheng Wang, Jiashu Lin, Xin Jiang, Xiao Chen and Qun Liu 发布。
|
1. **[Nezha](https://huggingface.co/docs/transformers/model_doc/nezha)** (来自华为诺亚方舟实验室) 伴随论文 [NEZHA: Neural Contextualized Representation for Chinese Language Understanding](https://arxiv.org/abs/1909.00204) 由 Junqiu Wei, Xiaozhe Ren, Xiaoguang Li, Wenyong Huang, Yi Liao, Yasheng Wang, Jiashu Lin, Xin Jiang, Xiao Chen and Qun Liu 发布。
|
||||||
1. **[NLLB](https://huggingface.co/docs/transformers/model_doc/nllb)** (来自 Meta) 伴随论文 [No Language Left Behind: Scaling Human-Centered Machine Translation](https://arxiv.org/abs/2207.04672) 由 the NLLB team 发布。
|
1. **[NLLB](https://huggingface.co/docs/transformers/model_doc/nllb)** (来自 Meta) 伴随论文 [No Language Left Behind: Scaling Human-Centered Machine Translation](https://arxiv.org/abs/2207.04672) 由 the NLLB team 发布。
|
||||||
|
1. **[NLLB-MOE](https://huggingface.co/docs/transformers/model_doc/nllb-moe)** (来自 Meta) 伴随论文 [No Language Left Behind: Scaling Human-Centered Machine Translation](https://arxiv.org/abs/2207.04672) 由 the NLLB team 发布。
|
||||||
1. **[Nyströmformer](https://huggingface.co/docs/transformers/model_doc/nystromformer)** (来自 the University of Wisconsin - Madison) 伴随论文 [Nyströmformer: A Nyström-Based Algorithm for Approximating Self-Attention](https://arxiv.org/abs/2102.03902) 由 Yunyang Xiong, Zhanpeng Zeng, Rudrasis Chakraborty, Mingxing Tan, Glenn Fung, Yin Li, Vikas Singh 发布。
|
1. **[Nyströmformer](https://huggingface.co/docs/transformers/model_doc/nystromformer)** (来自 the University of Wisconsin - Madison) 伴随论文 [Nyströmformer: A Nyström-Based Algorithm for Approximating Self-Attention](https://arxiv.org/abs/2102.03902) 由 Yunyang Xiong, Zhanpeng Zeng, Rudrasis Chakraborty, Mingxing Tan, Glenn Fung, Yin Li, Vikas Singh 发布。
|
||||||
|
1. **[OneFormer](https://huggingface.co/docs/transformers/model_doc/oneformer)** (来自 SHI Labs) 伴随论文 [OneFormer: One Transformer to Rule Universal Image Segmentation](https://arxiv.org/abs/2211.06220) 由 Jitesh Jain, Jiachen Li, MangTik Chiu, Ali Hassani, Nikita Orlov, Humphrey Shi 发布。
|
||||||
|
1. **[OpenLlama](https://huggingface.co/docs/transformers/model_doc/open-llama)** (来自 [s-JoL](https://huggingface.co/s-JoL)) 由 [Open-Llama](https://github.com/s-JoL/Open-Llama) 发布.
|
||||||
1. **[OPT](https://huggingface.co/docs/transformers/master/model_doc/opt)** (来自 Meta AI) 伴随论文 [OPT: Open Pre-trained Transformer Language Models](https://arxiv.org/abs/2205.01068) 由 Susan Zhang, Stephen Roller, Naman Goyal, Mikel Artetxe, Moya Chen, Shuohui Chen et al 发布。
|
1. **[OPT](https://huggingface.co/docs/transformers/master/model_doc/opt)** (来自 Meta AI) 伴随论文 [OPT: Open Pre-trained Transformer Language Models](https://arxiv.org/abs/2205.01068) 由 Susan Zhang, Stephen Roller, Naman Goyal, Mikel Artetxe, Moya Chen, Shuohui Chen et al 发布。
|
||||||
1. **[OWL-ViT](https://huggingface.co/docs/transformers/model_doc/owlvit)** (来自 Google AI) 伴随论文 [Simple Open-Vocabulary Object Detection with Vision Transformers](https://arxiv.org/abs/2205.06230) 由 Matthias Minderer, Alexey Gritsenko, Austin Stone, Maxim Neumann, Dirk Weissenborn, Alexey Dosovitskiy, Aravindh Mahendran, Anurag Arnab, Mostafa Dehghani, Zhuoran Shen, Xiao Wang, Xiaohua Zhai, Thomas Kipf, and Neil Houlsby 发布。
|
1. **[OWL-ViT](https://huggingface.co/docs/transformers/model_doc/owlvit)** (来自 Google AI) 伴随论文 [Simple Open-Vocabulary Object Detection with Vision Transformers](https://arxiv.org/abs/2205.06230) 由 Matthias Minderer, Alexey Gritsenko, Austin Stone, Maxim Neumann, Dirk Weissenborn, Alexey Dosovitskiy, Aravindh Mahendran, Anurag Arnab, Mostafa Dehghani, Zhuoran Shen, Xiao Wang, Xiaohua Zhai, Thomas Kipf, and Neil Houlsby 发布。
|
||||||
1. **[Pegasus](https://huggingface.co/docs/transformers/model_doc/pegasus)** (来自 Google) 伴随论文 [PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive Summarization](https://arxiv.org/abs/1912.08777) 由 Jingqing Zhang, Yao Zhao, Mohammad Saleh and Peter J. Liu 发布。
|
1. **[Pegasus](https://huggingface.co/docs/transformers/model_doc/pegasus)** (来自 Google) 伴随论文 [PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive Summarization](https://arxiv.org/abs/1912.08777) 由 Jingqing Zhang, Yao Zhao, Mohammad Saleh and Peter J. Liu 发布。
|
||||||
1. **[PEGASUS-X](https://huggingface.co/docs/transformers/model_doc/pegasus_x)** (来自 Google) 伴随论文 [Investigating Efficiently Extending Transformers for Long Input Summarization](https://arxiv.org/abs/2208.04347) 由 Jason Phang, Yao Zhao, Peter J. Liu 发布。
|
1. **[PEGASUS-X](https://huggingface.co/docs/transformers/model_doc/pegasus_x)** (来自 Google) 伴随论文 [Investigating Efficiently Extending Transformers for Long Input Summarization](https://arxiv.org/abs/2208.04347) 由 Jason Phang, Yao Zhao, Peter J. Liu 发布。
|
||||||
1. **[Perceiver IO](https://huggingface.co/docs/transformers/model_doc/perceiver)** (来自 Deepmind) 伴随论文 [Perceiver IO: A General Architecture for Structured Inputs & Outputs](https://arxiv.org/abs/2107.14795) 由 Andrew Jaegle, Sebastian Borgeaud, Jean-Baptiste Alayrac, Carl Doersch, Catalin Ionescu, David Ding, Skanda Koppula, Daniel Zoran, Andrew Brock, Evan Shelhamer, Olivier Hénaff, Matthew M. Botvinick, Andrew Zisserman, Oriol Vinyals, João Carreira 发布。
|
1. **[Perceiver IO](https://huggingface.co/docs/transformers/model_doc/perceiver)** (来自 Deepmind) 伴随论文 [Perceiver IO: A General Architecture for Structured Inputs & Outputs](https://arxiv.org/abs/2107.14795) 由 Andrew Jaegle, Sebastian Borgeaud, Jean-Baptiste Alayrac, Carl Doersch, Catalin Ionescu, David Ding, Skanda Koppula, Daniel Zoran, Andrew Brock, Evan Shelhamer, Olivier Hénaff, Matthew M. Botvinick, Andrew Zisserman, Oriol Vinyals, João Carreira 发布。
|
||||||
1. **[PhoBERT](https://huggingface.co/docs/transformers/model_doc/phobert)** (来自 VinAI Research) 伴随论文 [PhoBERT: Pre-trained language models for Vietnamese](https://www.aclweb.org/anthology/2020.findings-emnlp.92/) 由 Dat Quoc Nguyen and Anh Tuan Nguyen 发布。
|
1. **[PhoBERT](https://huggingface.co/docs/transformers/model_doc/phobert)** (来自 VinAI Research) 伴随论文 [PhoBERT: Pre-trained language models for Vietnamese](https://www.aclweb.org/anthology/2020.findings-emnlp.92/) 由 Dat Quoc Nguyen and Anh Tuan Nguyen 发布。
|
||||||
|
1. **[Pix2Struct](https://huggingface.co/docs/transformers/model_doc/pix2struct)** (来自 Google) 伴随论文 [Pix2Struct: Screenshot Parsing as Pretraining for Visual Language Understanding](https://arxiv.org/abs/2210.03347) 由 Kenton Lee, Mandar Joshi, Iulia Turc, Hexiang Hu, Fangyu Liu, Julian Eisenschlos, Urvashi Khandelwal, Peter Shaw, Ming-Wei Chang, Kristina Toutanova 发布。
|
||||||
1. **[PLBart](https://huggingface.co/docs/transformers/model_doc/plbart)** (来自 UCLA NLP) 伴随论文 [Unified Pre-training for Program Understanding and Generation](https://arxiv.org/abs/2103.06333) 由 Wasi Uddin Ahmad, Saikat Chakraborty, Baishakhi Ray, Kai-Wei Chang 发布。
|
1. **[PLBart](https://huggingface.co/docs/transformers/model_doc/plbart)** (来自 UCLA NLP) 伴随论文 [Unified Pre-training for Program Understanding and Generation](https://arxiv.org/abs/2103.06333) 由 Wasi Uddin Ahmad, Saikat Chakraborty, Baishakhi Ray, Kai-Wei Chang 发布。
|
||||||
1. **[PoolFormer](https://huggingface.co/docs/transformers/model_doc/poolformer)** (来自 Sea AI Labs) 伴随论文 [MetaFormer is Actually What You Need for Vision](https://arxiv.org/abs/2111.11418) 由 Yu, Weihao and Luo, Mi and Zhou, Pan and Si, Chenyang and Zhou, Yichen and Wang, Xinchao and Feng, Jiashi and Yan, Shuicheng 发布。
|
1. **[PoolFormer](https://huggingface.co/docs/transformers/model_doc/poolformer)** (来自 Sea AI Labs) 伴随论文 [MetaFormer is Actually What You Need for Vision](https://arxiv.org/abs/2111.11418) 由 Yu, Weihao and Luo, Mi and Zhou, Pan and Si, Chenyang and Zhou, Yichen and Wang, Xinchao and Feng, Jiashi and Yan, Shuicheng 发布。
|
||||||
1. **[ProphetNet](https://huggingface.co/docs/transformers/model_doc/prophetnet)** (来自 Microsoft Research) 伴随论文 [ProphetNet: Predicting Future N-gram for Sequence-to-Sequence Pre-training](https://arxiv.org/abs/2001.04063) 由 Yu Yan, Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang and Ming Zhou 发布。
|
1. **[ProphetNet](https://huggingface.co/docs/transformers/model_doc/prophetnet)** (来自 Microsoft Research) 伴随论文 [ProphetNet: Predicting Future N-gram for Sequence-to-Sequence Pre-training](https://arxiv.org/abs/2001.04063) 由 Yu Yan, Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang and Ming Zhou 发布。
|
||||||
@ -337,45 +390,62 @@ conda install -c huggingface transformers
|
|||||||
1. **[RemBERT](https://huggingface.co/docs/transformers/model_doc/rembert)** (来自 Google Research) 伴随论文 [Rethinking embedding coupling in pre-trained language models](https://arxiv.org/pdf/2010.12821.pdf) 由 Hyung Won Chung, Thibault Févry, Henry Tsai, M. Johnson, Sebastian Ruder 发布。
|
1. **[RemBERT](https://huggingface.co/docs/transformers/model_doc/rembert)** (来自 Google Research) 伴随论文 [Rethinking embedding coupling in pre-trained language models](https://arxiv.org/pdf/2010.12821.pdf) 由 Hyung Won Chung, Thibault Févry, Henry Tsai, M. Johnson, Sebastian Ruder 发布。
|
||||||
1. **[ResNet](https://huggingface.co/docs/transformers/model_doc/resnet)** (from Microsoft Research) released with the paper [Deep Residual Learning for Image Recognition](https://arxiv.org/abs/1512.03385) by Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun.
|
1. **[ResNet](https://huggingface.co/docs/transformers/model_doc/resnet)** (from Microsoft Research) released with the paper [Deep Residual Learning for Image Recognition](https://arxiv.org/abs/1512.03385) by Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun.
|
||||||
1. **[RoBERTa](https://huggingface.co/docs/transformers/model_doc/roberta)** (来自 Facebook), 伴随论文 [Robustly Optimized BERT Pretraining Approach](https://arxiv.org/abs/1907.11692) 由 Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, Veselin Stoyanov 发布。
|
1. **[RoBERTa](https://huggingface.co/docs/transformers/model_doc/roberta)** (来自 Facebook), 伴随论文 [Robustly Optimized BERT Pretraining Approach](https://arxiv.org/abs/1907.11692) 由 Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, Veselin Stoyanov 发布。
|
||||||
|
1. **[RoBERTa-PreLayerNorm](https://huggingface.co/docs/transformers/model_doc/roberta-prelayernorm)** (来自 Facebook) 伴随论文 [fairseq: A Fast, Extensible Toolkit for Sequence Modeling](https://arxiv.org/abs/1904.01038) 由 Myle Ott, Sergey Edunov, Alexei Baevski, Angela Fan, Sam Gross, Nathan Ng, David Grangier, Michael Auli 发布。
|
||||||
|
1. **[RoCBert](https://huggingface.co/docs/transformers/model_doc/roc_bert)** (来自 WeChatAI), 伴随论文 [RoCBert: Robust Chinese Bert with Multimodal Contrastive Pretraining](https://aclanthology.org/2022.acl-long.65.pdf) 由 HuiSu, WeiweiShi, XiaoyuShen, XiaoZhou, TuoJi, JiaruiFang, JieZhou 发布。
|
||||||
1. **[RoFormer](https://huggingface.co/docs/transformers/model_doc/roformer)** (来自 ZhuiyiTechnology), 伴随论文 [RoFormer: Enhanced Transformer with Rotary Position Embedding](https://arxiv.org/pdf/2104.09864v1.pdf) 由 Jianlin Su and Yu Lu and Shengfeng Pan and Bo Wen and Yunfeng Liu 发布。
|
1. **[RoFormer](https://huggingface.co/docs/transformers/model_doc/roformer)** (来自 ZhuiyiTechnology), 伴随论文 [RoFormer: Enhanced Transformer with Rotary Position Embedding](https://arxiv.org/pdf/2104.09864v1.pdf) 由 Jianlin Su and Yu Lu and Shengfeng Pan and Bo Wen and Yunfeng Liu 发布。
|
||||||
|
1. **[RWKV](https://huggingface.co/docs/transformers/model_doc/rwkv)** (来自 Bo Peng) 伴随论文 [this repo](https://github.com/BlinkDL/RWKV-LM) 由 Bo Peng 发布。
|
||||||
1. **[SegFormer](https://huggingface.co/docs/transformers/model_doc/segformer)** (来自 NVIDIA) 伴随论文 [SegFormer: Simple and Efficient Design for Semantic Segmentation with Transformers](https://arxiv.org/abs/2105.15203) 由 Enze Xie, Wenhai Wang, Zhiding Yu, Anima Anandkumar, Jose M. Alvarez, Ping Luo 发布。
|
1. **[SegFormer](https://huggingface.co/docs/transformers/model_doc/segformer)** (来自 NVIDIA) 伴随论文 [SegFormer: Simple and Efficient Design for Semantic Segmentation with Transformers](https://arxiv.org/abs/2105.15203) 由 Enze Xie, Wenhai Wang, Zhiding Yu, Anima Anandkumar, Jose M. Alvarez, Ping Luo 发布。
|
||||||
|
1. **[Segment Anything](https://huggingface.co/docs/transformers/model_doc/sam)** (来自 Meta AI) 伴随论文 [Segment Anything](https://arxiv.org/pdf/2304.02643v1.pdf) 由 Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alex Berg, Wan-Yen Lo, Piotr Dollar, Ross Girshick 发布。
|
||||||
1. **[SEW](https://huggingface.co/docs/transformers/model_doc/sew)** (来自 ASAPP) 伴随论文 [Performance-Efficiency Trade-offs in Unsupervised Pre-training for Speech Recognition](https://arxiv.org/abs/2109.06870) 由 Felix Wu, Kwangyoun Kim, Jing Pan, Kyu Han, Kilian Q. Weinberger, Yoav Artzi 发布。
|
1. **[SEW](https://huggingface.co/docs/transformers/model_doc/sew)** (来自 ASAPP) 伴随论文 [Performance-Efficiency Trade-offs in Unsupervised Pre-training for Speech Recognition](https://arxiv.org/abs/2109.06870) 由 Felix Wu, Kwangyoun Kim, Jing Pan, Kyu Han, Kilian Q. Weinberger, Yoav Artzi 发布。
|
||||||
1. **[SEW-D](https://huggingface.co/docs/transformers/model_doc/sew_d)** (来自 ASAPP) 伴随论文 [Performance-Efficiency Trade-offs in Unsupervised Pre-training for Speech Recognition](https://arxiv.org/abs/2109.06870) 由 Felix Wu, Kwangyoun Kim, Jing Pan, Kyu Han, Kilian Q. Weinberger, Yoav Artzi 发布。
|
1. **[SEW-D](https://huggingface.co/docs/transformers/model_doc/sew_d)** (来自 ASAPP) 伴随论文 [Performance-Efficiency Trade-offs in Unsupervised Pre-training for Speech Recognition](https://arxiv.org/abs/2109.06870) 由 Felix Wu, Kwangyoun Kim, Jing Pan, Kyu Han, Kilian Q. Weinberger, Yoav Artzi 发布。
|
||||||
|
1. **[SpeechT5](https://huggingface.co/docs/transformers/model_doc/speecht5)** (来自 Microsoft Research) 伴随论文 [SpeechT5: Unified-Modal Encoder-Decoder Pre-Training for Spoken Language Processing](https://arxiv.org/abs/2110.07205) 由 Junyi Ao, Rui Wang, Long Zhou, Chengyi Wang, Shuo Ren, Yu Wu, Shujie Liu, Tom Ko, Qing Li, Yu Zhang, Zhihua Wei, Yao Qian, Jinyu Li, Furu Wei 发布。
|
||||||
1. **[SpeechToTextTransformer](https://huggingface.co/docs/transformers/model_doc/speech_to_text)** (来自 Facebook), 伴随论文 [fairseq S2T: Fast Speech-to-Text Modeling with fairseq](https://arxiv.org/abs/2010.05171) 由 Changhan Wang, Yun Tang, Xutai Ma, Anne Wu, Dmytro Okhonko, Juan Pino 发布。
|
1. **[SpeechToTextTransformer](https://huggingface.co/docs/transformers/model_doc/speech_to_text)** (来自 Facebook), 伴随论文 [fairseq S2T: Fast Speech-to-Text Modeling with fairseq](https://arxiv.org/abs/2010.05171) 由 Changhan Wang, Yun Tang, Xutai Ma, Anne Wu, Dmytro Okhonko, Juan Pino 发布。
|
||||||
1. **[SpeechToTextTransformer2](https://huggingface.co/docs/transformers/model_doc/speech_to_text_2)** (来自 Facebook) 伴随论文 [Large-Scale Self- and Semi-Supervised Learning for Speech Translation](https://arxiv.org/abs/2104.06678) 由 Changhan Wang, Anne Wu, Juan Pino, Alexei Baevski, Michael Auli, Alexis Conneau 发布。
|
1. **[SpeechToTextTransformer2](https://huggingface.co/docs/transformers/model_doc/speech_to_text_2)** (来自 Facebook) 伴随论文 [Large-Scale Self- and Semi-Supervised Learning for Speech Translation](https://arxiv.org/abs/2104.06678) 由 Changhan Wang, Anne Wu, Juan Pino, Alexei Baevski, Michael Auli, Alexis Conneau 发布。
|
||||||
1. **[Splinter](https://huggingface.co/docs/transformers/model_doc/splinter)** (来自 Tel Aviv University) 伴随论文 [Few-Shot Question Answering by Pretraining Span Selection](https://arxiv.org/abs/2101.00438) 由 Ori Ram, Yuval Kirstain, Jonathan Berant, Amir Globerson, Omer Levy 发布。
|
1. **[Splinter](https://huggingface.co/docs/transformers/model_doc/splinter)** (来自 Tel Aviv University) 伴随论文 [Few-Shot Question Answering by Pretraining Span Selection](https://arxiv.org/abs/2101.00438) 由 Ori Ram, Yuval Kirstain, Jonathan Berant, Amir Globerson, Omer Levy 发布。
|
||||||
1. **[SqueezeBERT](https://huggingface.co/docs/transformers/model_doc/squeezebert)** (来自 Berkeley) 伴随论文 [SqueezeBERT: What can computer vision teach NLP about efficient neural networks?](https://arxiv.org/abs/2006.11316) 由 Forrest N. Iandola, Albert E. Shaw, Ravi Krishna, and Kurt W. Keutzer 发布。
|
1. **[SqueezeBERT](https://huggingface.co/docs/transformers/model_doc/squeezebert)** (来自 Berkeley) 伴随论文 [SqueezeBERT: What can computer vision teach NLP about efficient neural networks?](https://arxiv.org/abs/2006.11316) 由 Forrest N. Iandola, Albert E. Shaw, Ravi Krishna, and Kurt W. Keutzer 发布。
|
||||||
|
1. **[SwiftFormer](https://huggingface.co/docs/transformers/model_doc/swiftformer)** (来自 MBZUAI) 伴随论文 [SwiftFormer: Efficient Additive Attention for Transformer-based Real-time Mobile Vision Applications](https://arxiv.org/abs/2303.15446) 由 Abdelrahman Shaker, Muhammad Maaz, Hanoona Rasheed, Salman Khan, Ming-Hsuan Yang, Fahad Shahbaz Khan 发布。
|
||||||
1. **[Swin Transformer](https://huggingface.co/docs/transformers/model_doc/swin)** (来自 Microsoft) 伴随论文 [Swin Transformer: Hierarchical Vision Transformer using Shifted Windows](https://arxiv.org/abs/2103.14030) 由 Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, Baining Guo 发布。
|
1. **[Swin Transformer](https://huggingface.co/docs/transformers/model_doc/swin)** (来自 Microsoft) 伴随论文 [Swin Transformer: Hierarchical Vision Transformer using Shifted Windows](https://arxiv.org/abs/2103.14030) 由 Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, Baining Guo 发布。
|
||||||
1. **[Swin Transformer V2](https://huggingface.co/docs/transformers/model_doc/swinv2)** (来自 Microsoft) 伴随论文 [Swin Transformer V2: Scaling Up Capacity and Resolution](https://arxiv.org/abs/2111.09883) 由 Ze Liu, Han Hu, Yutong Lin, Zhuliang Yao, Zhenda Xie, Yixuan Wei, Jia Ning, Yue Cao, Zheng Zhang, Li Dong, Furu Wei, Baining Guo 发布。
|
1. **[Swin Transformer V2](https://huggingface.co/docs/transformers/model_doc/swinv2)** (来自 Microsoft) 伴随论文 [Swin Transformer V2: Scaling Up Capacity and Resolution](https://arxiv.org/abs/2111.09883) 由 Ze Liu, Han Hu, Yutong Lin, Zhuliang Yao, Zhenda Xie, Yixuan Wei, Jia Ning, Yue Cao, Zheng Zhang, Li Dong, Furu Wei, Baining Guo 发布。
|
||||||
|
1. **[Swin2SR](https://huggingface.co/docs/transformers/model_doc/swin2sr)** (来自 University of Würzburg) 伴随论文 [Swin2SR: SwinV2 Transformer for Compressed Image Super-Resolution and Restoration](https://arxiv.org/abs/2209.11345) 由 Marcos V. Conde, Ui-Jin Choi, Maxime Burchi, Radu Timofte 发布。
|
||||||
|
1. **[SwitchTransformers](https://huggingface.co/docs/transformers/model_doc/switch_transformers)** (from Google) released with the paper [Switch Transformers: Scaling to Trillion Parameter Models with Simple and Efficient Sparsity](https://arxiv.org/abs/2101.03961) by William Fedus, Barret Zoph, Noam Shazeer.
|
||||||
1. **[T5](https://huggingface.co/docs/transformers/model_doc/t5)** (来自 Google AI) 伴随论文 [Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer](https://arxiv.org/abs/1910.10683) 由 Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu 发布。
|
1. **[T5](https://huggingface.co/docs/transformers/model_doc/t5)** (来自 Google AI) 伴随论文 [Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer](https://arxiv.org/abs/1910.10683) 由 Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu 发布。
|
||||||
1. **[T5v1.1](https://huggingface.co/docs/transformers/model_doc/t5v1.1)** (来自 Google AI) 伴随论文 [google-research/text-to-text-transfer-transformer](https://github.com/google-research/text-to-text-transfer-transformer/blob/main/released_checkpoints.md#t511) 由 Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu 发布。
|
1. **[T5v1.1](https://huggingface.co/docs/transformers/model_doc/t5v1.1)** (来自 Google AI) 伴随论文 [google-research/text-to-text-transfer-transformer](https://github.com/google-research/text-to-text-transfer-transformer/blob/main/released_checkpoints.md#t511) 由 Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu 发布。
|
||||||
|
1. **[Table Transformer](https://huggingface.co/docs/transformers/model_doc/table-transformer)** (来自 Microsoft Research) 伴随论文 [PubTables-1M: Towards Comprehensive Table Extraction From Unstructured Documents](https://arxiv.org/abs/2110.00061) 由 Brandon Smock, Rohith Pesala, Robin Abraham 发布。
|
||||||
1. **[TAPAS](https://huggingface.co/docs/transformers/model_doc/tapas)** (来自 Google AI) 伴随论文 [TAPAS: Weakly Supervised Table Parsing via Pre-training](https://arxiv.org/abs/2004.02349) 由 Jonathan Herzig, Paweł Krzysztof Nowak, Thomas Müller, Francesco Piccinno and Julian Martin Eisenschlos 发布。
|
1. **[TAPAS](https://huggingface.co/docs/transformers/model_doc/tapas)** (来自 Google AI) 伴随论文 [TAPAS: Weakly Supervised Table Parsing via Pre-training](https://arxiv.org/abs/2004.02349) 由 Jonathan Herzig, Paweł Krzysztof Nowak, Thomas Müller, Francesco Piccinno and Julian Martin Eisenschlos 发布。
|
||||||
1. **[TAPEX](https://huggingface.co/docs/transformers/model_doc/tapex)** (来自 Microsoft Research) 伴随论文 [TAPEX: Table Pre-training via Learning a Neural SQL Executor](https://arxiv.org/abs/2107.07653) 由 Qian Liu, Bei Chen, Jiaqi Guo, Morteza Ziyadi, Zeqi Lin, Weizhu Chen, Jian-Guang Lou 发布。
|
1. **[TAPEX](https://huggingface.co/docs/transformers/model_doc/tapex)** (来自 Microsoft Research) 伴随论文 [TAPEX: Table Pre-training via Learning a Neural SQL Executor](https://arxiv.org/abs/2107.07653) 由 Qian Liu, Bei Chen, Jiaqi Guo, Morteza Ziyadi, Zeqi Lin, Weizhu Chen, Jian-Guang Lou 发布。
|
||||||
1. **[Time Series Transformer](https://huggingface.co/docs/transformers/model_doc/time_series_transformer)** (from HuggingFace).
|
1. **[Time Series Transformer](https://huggingface.co/docs/transformers/model_doc/time_series_transformer)** (from HuggingFace).
|
||||||
|
1. **[TimeSformer](https://huggingface.co/docs/transformers/model_doc/timesformer)** (from Facebook) released with the paper [Is Space-Time Attention All You Need for Video Understanding?](https://arxiv.org/abs/2102.05095) by Gedas Bertasius, Heng Wang, Lorenzo Torresani.
|
||||||
1. **[Trajectory Transformer](https://huggingface.co/docs/transformers/model_doc/trajectory_transformers)** (from the University of California at Berkeley) released with the paper [Offline Reinforcement Learning as One Big Sequence Modeling Problem](https://arxiv.org/abs/2106.02039) by Michael Janner, Qiyang Li, Sergey Levine
|
1. **[Trajectory Transformer](https://huggingface.co/docs/transformers/model_doc/trajectory_transformers)** (from the University of California at Berkeley) released with the paper [Offline Reinforcement Learning as One Big Sequence Modeling Problem](https://arxiv.org/abs/2106.02039) by Michael Janner, Qiyang Li, Sergey Levine
|
||||||
1. **[Transformer-XL](https://huggingface.co/docs/transformers/model_doc/transfo-xl)** (来自 Google/CMU) 伴随论文 [Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context](https://arxiv.org/abs/1901.02860) 由 Zihang Dai*, Zhilin Yang*, Yiming Yang, Jaime Carbonell, Quoc V. Le, Ruslan Salakhutdinov 发布。
|
1. **[Transformer-XL](https://huggingface.co/docs/transformers/model_doc/transfo-xl)** (来自 Google/CMU) 伴随论文 [Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context](https://arxiv.org/abs/1901.02860) 由 Zihang Dai*, Zhilin Yang*, Yiming Yang, Jaime Carbonell, Quoc V. Le, Ruslan Salakhutdinov 发布。
|
||||||
1. **[TrOCR](https://huggingface.co/docs/transformers/model_doc/trocr)** (来自 Microsoft) 伴随论文 [TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models](https://arxiv.org/abs/2109.10282) 由 Minghao Li, Tengchao Lv, Lei Cui, Yijuan Lu, Dinei Florencio, Cha Zhang, Zhoujun Li, Furu Wei 发布。
|
1. **[TrOCR](https://huggingface.co/docs/transformers/model_doc/trocr)** (来自 Microsoft) 伴随论文 [TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models](https://arxiv.org/abs/2109.10282) 由 Minghao Li, Tengchao Lv, Lei Cui, Yijuan Lu, Dinei Florencio, Cha Zhang, Zhoujun Li, Furu Wei 发布。
|
||||||
|
1. **[TVLT](https://huggingface.co/docs/transformers/model_doc/tvlt)** (来自 UNC Chapel Hill) 伴随论文 [TVLT: Textless Vision-Language Transformer](https://arxiv.org/abs/2209.14156) 由 Zineng Tang, Jaemin Cho, Yixin Nie, Mohit Bansal 发布。
|
||||||
1. **[UL2](https://huggingface.co/docs/transformers/model_doc/ul2)** (from Google Research) released with the paper [Unifying Language Learning Paradigms](https://arxiv.org/abs/2205.05131v1) by Yi Tay, Mostafa Dehghani, Vinh Q. Tran, Xavier Garcia, Dara Bahri, Tal Schuster, Huaixiu Steven Zheng, Neil Houlsby, Donald Metzler
|
1. **[UL2](https://huggingface.co/docs/transformers/model_doc/ul2)** (from Google Research) released with the paper [Unifying Language Learning Paradigms](https://arxiv.org/abs/2205.05131v1) by Yi Tay, Mostafa Dehghani, Vinh Q. Tran, Xavier Garcia, Dara Bahri, Tal Schuster, Huaixiu Steven Zheng, Neil Houlsby, Donald Metzler
|
||||||
|
1. **[UMT5](https://huggingface.co/docs/transformers/model_doc/umt5)** (来自 Google Research) 伴随论文 [UniMax: Fairer and More Effective Language Sampling for Large-Scale Multilingual Pretraining](https://openreview.net/forum?id=kXwdL1cWOAi) 由 Hyung Won Chung, Xavier Garcia, Adam Roberts, Yi Tay, Orhan Firat, Sharan Narang, Noah Constant 发布。
|
||||||
1. **[UniSpeech](https://huggingface.co/docs/transformers/model_doc/unispeech)** (来自 Microsoft Research) 伴随论文 [UniSpeech: Unified Speech Representation Learning with Labeled and Unlabeled Data](https://arxiv.org/abs/2101.07597) 由 Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei, Michael Zeng, Xuedong Huang 发布。
|
1. **[UniSpeech](https://huggingface.co/docs/transformers/model_doc/unispeech)** (来自 Microsoft Research) 伴随论文 [UniSpeech: Unified Speech Representation Learning with Labeled and Unlabeled Data](https://arxiv.org/abs/2101.07597) 由 Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei, Michael Zeng, Xuedong Huang 发布。
|
||||||
1. **[UniSpeechSat](https://huggingface.co/docs/transformers/model_doc/unispeech-sat)** (来自 Microsoft Research) 伴随论文 [UNISPEECH-SAT: UNIVERSAL SPEECH REPRESENTATION LEARNING WITH SPEAKER AWARE PRE-TRAINING](https://arxiv.org/abs/2110.05752) 由 Sanyuan Chen, Yu Wu, Chengyi Wang, Zhengyang Chen, Zhuo Chen, Shujie Liu, Jian Wu, Yao Qian, Furu Wei, Jinyu Li, Xiangzhan Yu 发布。
|
1. **[UniSpeechSat](https://huggingface.co/docs/transformers/model_doc/unispeech-sat)** (来自 Microsoft Research) 伴随论文 [UNISPEECH-SAT: UNIVERSAL SPEECH REPRESENTATION LEARNING WITH SPEAKER AWARE PRE-TRAINING](https://arxiv.org/abs/2110.05752) 由 Sanyuan Chen, Yu Wu, Chengyi Wang, Zhengyang Chen, Zhuo Chen, Shujie Liu, Jian Wu, Yao Qian, Furu Wei, Jinyu Li, Xiangzhan Yu 发布。
|
||||||
|
1. **[UPerNet](https://huggingface.co/docs/transformers/model_doc/upernet)** (来自 Peking University) 伴随论文 [Unified Perceptual Parsing for Scene Understanding](https://arxiv.org/abs/1807.10221) 由 Tete Xiao, Yingcheng Liu, Bolei Zhou, Yuning Jiang, Jian Sun 发布。
|
||||||
1. **[VAN](https://huggingface.co/docs/transformers/model_doc/van)** (来自 Tsinghua University and Nankai University) 伴随论文 [Visual Attention Network](https://arxiv.org/pdf/2202.09741.pdf) 由 Meng-Hao Guo, Cheng-Ze Lu, Zheng-Ning Liu, Ming-Ming Cheng, Shi-Min Hu 发布。
|
1. **[VAN](https://huggingface.co/docs/transformers/model_doc/van)** (来自 Tsinghua University and Nankai University) 伴随论文 [Visual Attention Network](https://arxiv.org/pdf/2202.09741.pdf) 由 Meng-Hao Guo, Cheng-Ze Lu, Zheng-Ning Liu, Ming-Ming Cheng, Shi-Min Hu 发布。
|
||||||
1. **[VideoMAE](https://huggingface.co/docs/transformers/model_doc/videomae)** (来自 Multimedia Computing Group, Nanjing University) 伴随论文 [VideoMAE: Masked Autoencoders are Data-Efficient Learners for Self-Supervised Video Pre-Training](https://arxiv.org/abs/2203.12602) 由 Zhan Tong, Yibing Song, Jue Wang, Limin Wang 发布。
|
1. **[VideoMAE](https://huggingface.co/docs/transformers/model_doc/videomae)** (来自 Multimedia Computing Group, Nanjing University) 伴随论文 [VideoMAE: Masked Autoencoders are Data-Efficient Learners for Self-Supervised Video Pre-Training](https://arxiv.org/abs/2203.12602) 由 Zhan Tong, Yibing Song, Jue Wang, Limin Wang 发布。
|
||||||
1. **[ViLT](https://huggingface.co/docs/transformers/model_doc/vilt)** (来自 NAVER AI Lab/Kakao Enterprise/Kakao Brain) 伴随论文 [ViLT: Vision-and-Language Transformer Without Convolution or Region Supervision](https://arxiv.org/abs/2102.03334) 由 Wonjae Kim, Bokyung Son, Ildoo Kim 发布。
|
1. **[ViLT](https://huggingface.co/docs/transformers/model_doc/vilt)** (来自 NAVER AI Lab/Kakao Enterprise/Kakao Brain) 伴随论文 [ViLT: Vision-and-Language Transformer Without Convolution or Region Supervision](https://arxiv.org/abs/2102.03334) 由 Wonjae Kim, Bokyung Son, Ildoo Kim 发布。
|
||||||
1. **[Vision Transformer (ViT)](https://huggingface.co/docs/transformers/model_doc/vit)** (来自 Google AI) 伴随论文 [An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale](https://arxiv.org/abs/2010.11929) 由 Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, Neil Houlsby 发布。
|
1. **[Vision Transformer (ViT)](https://huggingface.co/docs/transformers/model_doc/vit)** (来自 Google AI) 伴随论文 [An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale](https://arxiv.org/abs/2010.11929) 由 Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, Neil Houlsby 发布。
|
||||||
1. **[VisualBERT](https://huggingface.co/docs/transformers/model_doc/visual_bert)** (来自 UCLA NLP) 伴随论文 [VisualBERT: A Simple and Performant Baseline for Vision and Language](https://arxiv.org/pdf/1908.03557) 由 Liunian Harold Li, Mark Yatskar, Da Yin, Cho-Jui Hsieh, Kai-Wei Chang 发布。
|
1. **[VisualBERT](https://huggingface.co/docs/transformers/model_doc/visual_bert)** (来自 UCLA NLP) 伴随论文 [VisualBERT: A Simple and Performant Baseline for Vision and Language](https://arxiv.org/pdf/1908.03557) 由 Liunian Harold Li, Mark Yatskar, Da Yin, Cho-Jui Hsieh, Kai-Wei Chang 发布。
|
||||||
|
1. **[ViT Hybrid](https://huggingface.co/docs/transformers/model_doc/vit_hybrid)** (来自 Google AI) 伴随论文 [An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale](https://arxiv.org/abs/2010.11929) 由 Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, Neil Houlsby 发布。
|
||||||
1. **[ViTMAE](https://huggingface.co/docs/transformers/model_doc/vit_mae)** (来自 Meta AI) 伴随论文 [Masked Autoencoders Are Scalable Vision Learners](https://arxiv.org/abs/2111.06377) 由 Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Dollár, Ross Girshick 发布。
|
1. **[ViTMAE](https://huggingface.co/docs/transformers/model_doc/vit_mae)** (来自 Meta AI) 伴随论文 [Masked Autoencoders Are Scalable Vision Learners](https://arxiv.org/abs/2111.06377) 由 Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Dollár, Ross Girshick 发布。
|
||||||
1. **[ViTMSN](https://huggingface.co/docs/transformers/model_doc/vit_msn)** (来自 Meta AI) 伴随论文 [Masked Siamese Networks for Label-Efficient Learning](https://arxiv.org/abs/2204.07141) by Mahmoud Assran, Mathilde Caron, Ishan Misra, Piotr Bojanowski, Florian Bordes, Pascal Vincent, Armand Joulin, Michael Rabbat, Nicolas Ballas 发布.
|
1. **[ViTMSN](https://huggingface.co/docs/transformers/model_doc/vit_msn)** (来自 Meta AI) 伴随论文 [Masked Siamese Networks for Label-Efficient Learning](https://arxiv.org/abs/2204.07141) by Mahmoud Assran, Mathilde Caron, Ishan Misra, Piotr Bojanowski, Florian Bordes, Pascal Vincent, Armand Joulin, Michael Rabbat, Nicolas Ballas 发布.
|
||||||
|
1. **[ViViT](https://huggingface.co/docs/transformers/model_doc/vivit)** (来自 Google Research) released with the paper [ViViT: A Video Vision Transformer](https://arxiv.org/abs/2103.15691) 由 Anurag Arnab, Mostafa Dehghani, Georg Heigold, Chen Sun, Mario Lučić, Cordelia Schmid.
|
||||||
1. **[Wav2Vec2](https://huggingface.co/docs/transformers/model_doc/wav2vec2)** (来自 Facebook AI) 伴随论文 [wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations](https://arxiv.org/abs/2006.11477) 由 Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli 发布。
|
1. **[Wav2Vec2](https://huggingface.co/docs/transformers/model_doc/wav2vec2)** (来自 Facebook AI) 伴随论文 [wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations](https://arxiv.org/abs/2006.11477) 由 Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli 发布。
|
||||||
1. **[Wav2Vec2-Conformer](https://huggingface.co/docs/transformers/model_doc/wav2vec2-conformer)** (来自 Facebook AI) 伴随论文 [FAIRSEQ S2T: Fast Speech-to-Text Modeling with FAIRSEQ](https://arxiv.org/abs/2010.05171) 由 Changhan Wang, Yun Tang, Xutai Ma, Anne Wu, Sravya Popuri, Dmytro Okhonko, Juan Pino 发布。
|
1. **[Wav2Vec2-Conformer](https://huggingface.co/docs/transformers/model_doc/wav2vec2-conformer)** (来自 Facebook AI) 伴随论文 [FAIRSEQ S2T: Fast Speech-to-Text Modeling with FAIRSEQ](https://arxiv.org/abs/2010.05171) 由 Changhan Wang, Yun Tang, Xutai Ma, Anne Wu, Sravya Popuri, Dmytro Okhonko, Juan Pino 发布。
|
||||||
1. **[Wav2Vec2Phoneme](https://huggingface.co/docs/transformers/model_doc/wav2vec2_phoneme)** (来自 Facebook AI) 伴随论文 [Simple and Effective Zero-shot Cross-lingual Phoneme Recognition](https://arxiv.org/abs/2109.11680) 由 Qiantong Xu, Alexei Baevski, Michael Auli 发布。
|
1. **[Wav2Vec2Phoneme](https://huggingface.co/docs/transformers/model_doc/wav2vec2_phoneme)** (来自 Facebook AI) 伴随论文 [Simple and Effective Zero-shot Cross-lingual Phoneme Recognition](https://arxiv.org/abs/2109.11680) 由 Qiantong Xu, Alexei Baevski, Michael Auli 发布。
|
||||||
1. **[WavLM](https://huggingface.co/docs/transformers/model_doc/wavlm)** (from Microsoft Research) released with the paper [WavLM: Large-Scale Self-Supervised Pre-Training for Full Stack Speech Processing](https://arxiv.org/abs/2110.13900) by Sanyuan Chen, Chengyi Wang, Zhengyang Chen, Yu Wu, Shujie Liu, Zhuo Chen, Jinyu Li, Naoyuki Kanda, Takuya Yoshioka, Xiong Xiao, Jian Wu, Long Zhou, Shuo Ren, Yanmin Qian, Yao Qian, Jian Wu, Michael Zeng, Furu Wei.
|
1. **[WavLM](https://huggingface.co/docs/transformers/model_doc/wavlm)** (from Microsoft Research) released with the paper [WavLM: Large-Scale Self-Supervised Pre-Training for Full Stack Speech Processing](https://arxiv.org/abs/2110.13900) by Sanyuan Chen, Chengyi Wang, Zhengyang Chen, Yu Wu, Shujie Liu, Zhuo Chen, Jinyu Li, Naoyuki Kanda, Takuya Yoshioka, Xiong Xiao, Jian Wu, Long Zhou, Shuo Ren, Yanmin Qian, Yao Qian, Jian Wu, Michael Zeng, Furu Wei.
|
||||||
1. **[Whisper](https://huggingface.co/docs/transformers/model_doc/whisper)** (来自 OpenAI) 伴随论文 [Robust Speech Recognition via Large-Scale Weak Supervision](https://cdn.openai.com/papers/whisper.pdf) 由 Alec Radford, Jong Wook Kim, Tao Xu, Greg Brockman, Christine McLeavey, Ilya Sutskever 发布。
|
1. **[Whisper](https://huggingface.co/docs/transformers/model_doc/whisper)** (来自 OpenAI) 伴随论文 [Robust Speech Recognition via Large-Scale Weak Supervision](https://cdn.openai.com/papers/whisper.pdf) 由 Alec Radford, Jong Wook Kim, Tao Xu, Greg Brockman, Christine McLeavey, Ilya Sutskever 发布。
|
||||||
1. **[X-CLIP](https://huggingface.co/docs/transformers/model_doc/xclip)** (来自 Microsoft Research) 伴随论文 [Expanding Language-Image Pretrained Models for General Video Recognition](https://arxiv.org/abs/2208.02816) 由 Bolin Ni, Houwen Peng, Minghao Chen, Songyang Zhang, Gaofeng Meng, Jianlong Fu, Shiming Xiang, Haibin Ling 发布。
|
1. **[X-CLIP](https://huggingface.co/docs/transformers/model_doc/xclip)** (来自 Microsoft Research) 伴随论文 [Expanding Language-Image Pretrained Models for General Video Recognition](https://arxiv.org/abs/2208.02816) 由 Bolin Ni, Houwen Peng, Minghao Chen, Songyang Zhang, Gaofeng Meng, Jianlong Fu, Shiming Xiang, Haibin Ling 发布。
|
||||||
|
1. **[X-MOD](https://huggingface.co/docs/transformers/model_doc/xmod)** (来自 Meta AI) 伴随论文 [Lifting the Curse of Multilinguality by Pre-training Modular Transformers](http://dx.doi.org/10.18653/v1/2022.naacl-main.255) 由 Jonas Pfeiffer, Naman Goyal, Xi Lin, Xian Li, James Cross, Sebastian Riedel, Mikel Artetxe 发布。
|
||||||
1. **[XGLM](https://huggingface.co/docs/transformers/model_doc/xglm)** (From Facebook AI) released with the paper [Few-shot Learning with Multilingual Language Models](https://arxiv.org/abs/2112.10668) by Xi Victoria Lin, Todor Mihaylov, Mikel Artetxe, Tianlu Wang, Shuohui Chen, Daniel Simig, Myle Ott, Naman Goyal, Shruti Bhosale, Jingfei Du, Ramakanth Pasunuru, Sam Shleifer, Punit Singh Koura, Vishrav Chaudhary, Brian O'Horo, Jeff Wang, Luke Zettlemoyer, Zornitsa Kozareva, Mona Diab, Veselin Stoyanov, Xian Li.
|
1. **[XGLM](https://huggingface.co/docs/transformers/model_doc/xglm)** (From Facebook AI) released with the paper [Few-shot Learning with Multilingual Language Models](https://arxiv.org/abs/2112.10668) by Xi Victoria Lin, Todor Mihaylov, Mikel Artetxe, Tianlu Wang, Shuohui Chen, Daniel Simig, Myle Ott, Naman Goyal, Shruti Bhosale, Jingfei Du, Ramakanth Pasunuru, Sam Shleifer, Punit Singh Koura, Vishrav Chaudhary, Brian O'Horo, Jeff Wang, Luke Zettlemoyer, Zornitsa Kozareva, Mona Diab, Veselin Stoyanov, Xian Li.
|
||||||
1. **[XLM](https://huggingface.co/docs/transformers/model_doc/xlm)** (来自 Facebook) 伴随论文 [Cross-lingual Language Model Pretraining](https://arxiv.org/abs/1901.07291) 由 Guillaume Lample and Alexis Conneau 发布。
|
1. **[XLM](https://huggingface.co/docs/transformers/model_doc/xlm)** (来自 Facebook) 伴随论文 [Cross-lingual Language Model Pretraining](https://arxiv.org/abs/1901.07291) 由 Guillaume Lample and Alexis Conneau 发布。
|
||||||
1. **[XLM-ProphetNet](https://huggingface.co/docs/transformers/model_doc/xlm-prophetnet)** (来自 Microsoft Research) 伴随论文 [ProphetNet: Predicting Future N-gram for Sequence-to-Sequence Pre-training](https://arxiv.org/abs/2001.04063) 由 Yu Yan, Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang and Ming Zhou 发布。
|
1. **[XLM-ProphetNet](https://huggingface.co/docs/transformers/model_doc/xlm-prophetnet)** (来自 Microsoft Research) 伴随论文 [ProphetNet: Predicting Future N-gram for Sequence-to-Sequence Pre-training](https://arxiv.org/abs/2001.04063) 由 Yu Yan, Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang and Ming Zhou 发布。
|
||||||
1. **[XLM-RoBERTa](https://huggingface.co/docs/transformers/model_doc/xlm-roberta)** (来自 Facebook AI), 伴随论文 [Unsupervised Cross-lingual Representation Learning at Scale](https://arxiv.org/abs/1911.02116) 由 Alexis Conneau*, Kartikay Khandelwal*, Naman Goyal, Vishrav Chaudhary, Guillaume Wenzek, Francisco Guzmán, Edouard Grave, Myle Ott, Luke Zettlemoyer and Veselin Stoyanov 发布。
|
1. **[XLM-RoBERTa](https://huggingface.co/docs/transformers/model_doc/xlm-roberta)** (来自 Facebook AI), 伴随论文 [Unsupervised Cross-lingual Representation Learning at Scale](https://arxiv.org/abs/1911.02116) 由 Alexis Conneau*, Kartikay Khandelwal*, Naman Goyal, Vishrav Chaudhary, Guillaume Wenzek, Francisco Guzmán, Edouard Grave, Myle Ott, Luke Zettlemoyer and Veselin Stoyanov 发布。
|
||||||
1. **[XLM-RoBERTa-XL](https://huggingface.co/docs/transformers/model_doc/xlm-roberta-xl)** (来自 Facebook AI) 伴随论文 [Larger-Scale Transformers for Multilingual Masked Language Modeling](https://arxiv.org/abs/2105.00572) 由 Naman Goyal, Jingfei Du, Myle Ott, Giri Anantharaman, Alexis Conneau 发布。
|
1. **[XLM-RoBERTa-XL](https://huggingface.co/docs/transformers/model_doc/xlm-roberta-xl)** (来自 Facebook AI) 伴随论文 [Larger-Scale Transformers for Multilingual Masked Language Modeling](https://arxiv.org/abs/2105.00572) 由 Naman Goyal, Jingfei Du, Myle Ott, Giri Anantharaman, Alexis Conneau 发布。
|
||||||
|
1. **[XLM-V](https://huggingface.co/docs/transformers/model_doc/xlm-v)** (来自 Meta AI) 伴随论文 [XLM-V: Overcoming the Vocabulary Bottleneck in Multilingual Masked Language Models](https://arxiv.org/abs/2301.10472) 由 Davis Liang, Hila Gonen, Yuning Mao, Rui Hou, Naman Goyal, Marjan Ghazvininejad, Luke Zettlemoyer, Madian Khabsa 发布。
|
||||||
1. **[XLNet](https://huggingface.co/docs/transformers/model_doc/xlnet)** (来自 Google/CMU) 伴随论文 [XLNet: Generalized Autoregressive Pretraining for Language Understanding](https://arxiv.org/abs/1906.08237) 由 Zhilin Yang*, Zihang Dai*, Yiming Yang, Jaime Carbonell, Ruslan Salakhutdinov, Quoc V. Le 发布。
|
1. **[XLNet](https://huggingface.co/docs/transformers/model_doc/xlnet)** (来自 Google/CMU) 伴随论文 [XLNet: Generalized Autoregressive Pretraining for Language Understanding](https://arxiv.org/abs/1906.08237) 由 Zhilin Yang*, Zihang Dai*, Yiming Yang, Jaime Carbonell, Ruslan Salakhutdinov, Quoc V. Le 发布。
|
||||||
1. **[XLS-R](https://huggingface.co/docs/transformers/model_doc/xls_r)** (来自 Facebook AI) 伴随论文 [XLS-R: Self-supervised Cross-lingual Speech Representation Learning at Scale](https://arxiv.org/abs/2111.09296) 由 Arun Babu, Changhan Wang, Andros Tjandra, Kushal Lakhotia, Qiantong Xu, Naman Goyal, Kritika Singh, Patrick von Platen, Yatharth Saraf, Juan Pino, Alexei Baevski, Alexis Conneau, Michael Auli 发布。
|
1. **[XLS-R](https://huggingface.co/docs/transformers/model_doc/xls_r)** (来自 Facebook AI) 伴随论文 [XLS-R: Self-supervised Cross-lingual Speech Representation Learning at Scale](https://arxiv.org/abs/2111.09296) 由 Arun Babu, Changhan Wang, Andros Tjandra, Kushal Lakhotia, Qiantong Xu, Naman Goyal, Kritika Singh, Patrick von Platen, Yatharth Saraf, Juan Pino, Alexei Baevski, Alexis Conneau, Michael Auli 发布。
|
||||||
1. **[XLSR-Wav2Vec2](https://huggingface.co/docs/transformers/model_doc/xlsr_wav2vec2)** (来自 Facebook AI) 伴随论文 [Unsupervised Cross-Lingual Representation Learning For Speech Recognition](https://arxiv.org/abs/2006.13979) 由 Alexis Conneau, Alexei Baevski, Ronan Collobert, Abdelrahman Mohamed, Michael Auli 发布。
|
1. **[XLSR-Wav2Vec2](https://huggingface.co/docs/transformers/model_doc/xlsr_wav2vec2)** (来自 Facebook AI) 伴随论文 [Unsupervised Cross-Lingual Representation Learning For Speech Recognition](https://arxiv.org/abs/2006.13979) 由 Alexis Conneau, Alexei Baevski, Ronan Collobert, Abdelrahman Mohamed, Michael Auli 发布。
|
||||||
@ -392,7 +462,7 @@ conda install -c huggingface transformers
|
|||||||
|
|
||||||
| 章节 | 描述 |
|
| 章节 | 描述 |
|
||||||
|-|-|
|
|-|-|
|
||||||
| [文档](https://huggingface.co/transformers/) | 完整的 API 文档和教程 |
|
| [文档](https://huggingface.co/docs/transformers/) | 完整的 API 文档和教程 |
|
||||||
| [任务总结](https://huggingface.co/docs/transformers/task_summary) | 🤗 Transformers 支持的任务 |
|
| [任务总结](https://huggingface.co/docs/transformers/task_summary) | 🤗 Transformers 支持的任务 |
|
||||||
| [预处理教程](https://huggingface.co/docs/transformers/preprocessing) | 使用 `Tokenizer` 来为模型准备数据 |
|
| [预处理教程](https://huggingface.co/docs/transformers/preprocessing) | 使用 `Tokenizer` 来为模型准备数据 |
|
||||||
| [训练和微调](https://huggingface.co/docs/transformers/training) | 在 PyTorch/TensorFlow 的训练循环或 `Trainer` API 中使用 🤗 Transformers 提供的模型 |
|
| [训练和微调](https://huggingface.co/docs/transformers/training) | 在 PyTorch/TensorFlow 的训练循环或 `Trainer` API 中使用 🤗 Transformers 提供的模型 |
|
||||||
|
|||||||
@ -81,7 +81,9 @@ user: 使用者
|
|||||||
<a href="https://github.com/huggingface/transformers/blob/main/README_zh-hans.md">简体中文</a> |
|
<a href="https://github.com/huggingface/transformers/blob/main/README_zh-hans.md">简体中文</a> |
|
||||||
<b>繁體中文</b> |
|
<b>繁體中文</b> |
|
||||||
<a href="https://github.com/huggingface/transformers/blob/main/README_ko.md">한국어</a> |
|
<a href="https://github.com/huggingface/transformers/blob/main/README_ko.md">한국어</a> |
|
||||||
<a href="https://github.com/huggingface/transformers/blob/main/README_es.md">Español</a>
|
<a href="https://github.com/huggingface/transformers/blob/main/README_es.md">Español</a> |
|
||||||
|
<a href="https://github.com/huggingface/transformers/blob/main/README_ja.md">日本語</a> |
|
||||||
|
<a href="https://github.com/huggingface/transformers/blob/main/README_hd.md">हिन्दी</a>
|
||||||
<p>
|
<p>
|
||||||
</h4>
|
</h4>
|
||||||
|
|
||||||
@ -247,6 +249,11 @@ conda install -c huggingface transformers
|
|||||||
🤗 Transformers 目前支援以下的架構(模型概覽請參閱[這裡](https://huggingface.co/docs/transformers/model_summary)):
|
🤗 Transformers 目前支援以下的架構(模型概覽請參閱[這裡](https://huggingface.co/docs/transformers/model_summary)):
|
||||||
|
|
||||||
1. **[ALBERT](https://huggingface.co/docs/transformers/model_doc/albert)** (from Google Research and the Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.
|
1. **[ALBERT](https://huggingface.co/docs/transformers/model_doc/albert)** (from Google Research and the Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.
|
||||||
|
1. **[ALIGN](https://huggingface.co/docs/transformers/model_doc/align)** (from Google Research) released with the paper [Scaling Up Visual and Vision-Language Representation Learning With Noisy Text Supervision](https://arxiv.org/abs/2102.05918) by Chao Jia, Yinfei Yang, Ye Xia, Yi-Ting Chen, Zarana Parekh, Hieu Pham, Quoc V. Le, Yunhsuan Sung, Zhen Li, Tom Duerig.
|
||||||
|
1. **[AltCLIP](https://huggingface.co/docs/transformers/model_doc/altclip)** (from BAAI) released with the paper [AltCLIP: Altering the Language Encoder in CLIP for Extended Language Capabilities](https://arxiv.org/abs/2211.06679) by Chen, Zhongzhi and Liu, Guang and Zhang, Bo-Wen and Ye, Fulong and Yang, Qinghong and Wu, Ledell.
|
||||||
|
1. **[Audio Spectrogram Transformer](https://huggingface.co/docs/transformers/model_doc/audio-spectrogram-transformer)** (from MIT) released with the paper [AST: Audio Spectrogram Transformer](https://arxiv.org/abs/2104.01778) by Yuan Gong, Yu-An Chung, James Glass.
|
||||||
|
1. **[Autoformer](https://huggingface.co/docs/transformers/model_doc/autoformer)** (from Tsinghua University) released with the paper [Autoformer: Decomposition Transformers with Auto-Correlation for Long-Term Series Forecasting](https://arxiv.org/abs/2106.13008) by Haixu Wu, Jiehui Xu, Jianmin Wang, Mingsheng Long.
|
||||||
|
1. **[Bark](https://huggingface.co/docs/transformers/model_doc/bark)** (from Suno) released in the repository [suno-ai/bark](https://github.com/suno-ai/bark) by Suno AI team.
|
||||||
1. **[BART](https://huggingface.co/docs/transformers/model_doc/bart)** (from Facebook) released with the paper [BART: Denoising Sequence-to-Sequence Pre-training for Natural Language Generation, Translation, and Comprehension](https://arxiv.org/pdf/1910.13461.pdf) by Mike Lewis, Yinhan Liu, Naman Goyal, Marjan Ghazvininejad, Abdelrahman Mohamed, Omer Levy, Ves Stoyanov and Luke Zettlemoyer.
|
1. **[BART](https://huggingface.co/docs/transformers/model_doc/bart)** (from Facebook) released with the paper [BART: Denoising Sequence-to-Sequence Pre-training for Natural Language Generation, Translation, and Comprehension](https://arxiv.org/pdf/1910.13461.pdf) by Mike Lewis, Yinhan Liu, Naman Goyal, Marjan Ghazvininejad, Abdelrahman Mohamed, Omer Levy, Ves Stoyanov and Luke Zettlemoyer.
|
||||||
1. **[BARThez](https://huggingface.co/docs/transformers/model_doc/barthez)** (from École polytechnique) released with the paper [BARThez: a Skilled Pretrained French Sequence-to-Sequence Model](https://arxiv.org/abs/2010.12321) by Moussa Kamal Eddine, Antoine J.-P. Tixier, Michalis Vazirgiannis.
|
1. **[BARThez](https://huggingface.co/docs/transformers/model_doc/barthez)** (from École polytechnique) released with the paper [BARThez: a Skilled Pretrained French Sequence-to-Sequence Model](https://arxiv.org/abs/2010.12321) by Moussa Kamal Eddine, Antoine J.-P. Tixier, Michalis Vazirgiannis.
|
||||||
1. **[BARTpho](https://huggingface.co/docs/transformers/model_doc/bartpho)** (from VinAI Research) released with the paper [BARTpho: Pre-trained Sequence-to-Sequence Models for Vietnamese](https://arxiv.org/abs/2109.09701) by Nguyen Luong Tran, Duong Minh Le and Dat Quoc Nguyen.
|
1. **[BARTpho](https://huggingface.co/docs/transformers/model_doc/bartpho)** (from VinAI Research) released with the paper [BARTpho: Pre-trained Sequence-to-Sequence Models for Vietnamese](https://arxiv.org/abs/2109.09701) by Nguyen Luong Tran, Duong Minh Le and Dat Quoc Nguyen.
|
||||||
@ -256,19 +263,29 @@ conda install -c huggingface transformers
|
|||||||
1. **[BERTweet](https://huggingface.co/docs/transformers/model_doc/bertweet)** (from VinAI Research) released with the paper [BERTweet: A pre-trained language model for English Tweets](https://aclanthology.org/2020.emnlp-demos.2/) by Dat Quoc Nguyen, Thanh Vu and Anh Tuan Nguyen.
|
1. **[BERTweet](https://huggingface.co/docs/transformers/model_doc/bertweet)** (from VinAI Research) released with the paper [BERTweet: A pre-trained language model for English Tweets](https://aclanthology.org/2020.emnlp-demos.2/) by Dat Quoc Nguyen, Thanh Vu and Anh Tuan Nguyen.
|
||||||
1. **[BigBird-Pegasus](https://huggingface.co/docs/transformers/model_doc/bigbird_pegasus)** (from Google Research) released with the paper [Big Bird: Transformers for Longer Sequences](https://arxiv.org/abs/2007.14062) by Manzil Zaheer, Guru Guruganesh, Avinava Dubey, Joshua Ainslie, Chris Alberti, Santiago Ontanon, Philip Pham, Anirudh Ravula, Qifan Wang, Li Yang, Amr Ahmed.
|
1. **[BigBird-Pegasus](https://huggingface.co/docs/transformers/model_doc/bigbird_pegasus)** (from Google Research) released with the paper [Big Bird: Transformers for Longer Sequences](https://arxiv.org/abs/2007.14062) by Manzil Zaheer, Guru Guruganesh, Avinava Dubey, Joshua Ainslie, Chris Alberti, Santiago Ontanon, Philip Pham, Anirudh Ravula, Qifan Wang, Li Yang, Amr Ahmed.
|
||||||
1. **[BigBird-RoBERTa](https://huggingface.co/docs/transformers/model_doc/big_bird)** (from Google Research) released with the paper [Big Bird: Transformers for Longer Sequences](https://arxiv.org/abs/2007.14062) by Manzil Zaheer, Guru Guruganesh, Avinava Dubey, Joshua Ainslie, Chris Alberti, Santiago Ontanon, Philip Pham, Anirudh Ravula, Qifan Wang, Li Yang, Amr Ahmed.
|
1. **[BigBird-RoBERTa](https://huggingface.co/docs/transformers/model_doc/big_bird)** (from Google Research) released with the paper [Big Bird: Transformers for Longer Sequences](https://arxiv.org/abs/2007.14062) by Manzil Zaheer, Guru Guruganesh, Avinava Dubey, Joshua Ainslie, Chris Alberti, Santiago Ontanon, Philip Pham, Anirudh Ravula, Qifan Wang, Li Yang, Amr Ahmed.
|
||||||
|
1. **[BioGpt](https://huggingface.co/docs/transformers/model_doc/biogpt)** (from Microsoft Research AI4Science) released with the paper [BioGPT: generative pre-trained transformer for biomedical text generation and mining](https://academic.oup.com/bib/advance-article/doi/10.1093/bib/bbac409/6713511?guestAccessKey=a66d9b5d-4f83-4017-bb52-405815c907b9) by Renqian Luo, Liai Sun, Yingce Xia, Tao Qin, Sheng Zhang, Hoifung Poon and Tie-Yan Liu.
|
||||||
|
1. **[BiT](https://huggingface.co/docs/transformers/model_doc/bit)** (from Google AI) released with the paper [Big Transfer (BiT) by Alexander Kolesnikov, Lucas Beyer, Xiaohua Zhai, Joan Puigcerver, Jessica Yung, Sylvain Gelly, Neil Houlsby.
|
||||||
1. **[Blenderbot](https://huggingface.co/docs/transformers/model_doc/blenderbot)** (from Facebook) released with the paper [Recipes for building an open-domain chatbot](https://arxiv.org/abs/2004.13637) by Stephen Roller, Emily Dinan, Naman Goyal, Da Ju, Mary Williamson, Yinhan Liu, Jing Xu, Myle Ott, Kurt Shuster, Eric M. Smith, Y-Lan Boureau, Jason Weston.
|
1. **[Blenderbot](https://huggingface.co/docs/transformers/model_doc/blenderbot)** (from Facebook) released with the paper [Recipes for building an open-domain chatbot](https://arxiv.org/abs/2004.13637) by Stephen Roller, Emily Dinan, Naman Goyal, Da Ju, Mary Williamson, Yinhan Liu, Jing Xu, Myle Ott, Kurt Shuster, Eric M. Smith, Y-Lan Boureau, Jason Weston.
|
||||||
1. **[BlenderbotSmall](https://huggingface.co/docs/transformers/model_doc/blenderbot-small)** (from Facebook) released with the paper [Recipes for building an open-domain chatbot](https://arxiv.org/abs/2004.13637) by Stephen Roller, Emily Dinan, Naman Goyal, Da Ju, Mary Williamson, Yinhan Liu, Jing Xu, Myle Ott, Kurt Shuster, Eric M. Smith, Y-Lan Boureau, Jason Weston.
|
1. **[BlenderbotSmall](https://huggingface.co/docs/transformers/model_doc/blenderbot-small)** (from Facebook) released with the paper [Recipes for building an open-domain chatbot](https://arxiv.org/abs/2004.13637) by Stephen Roller, Emily Dinan, Naman Goyal, Da Ju, Mary Williamson, Yinhan Liu, Jing Xu, Myle Ott, Kurt Shuster, Eric M. Smith, Y-Lan Boureau, Jason Weston.
|
||||||
1. **[BLOOM](https://huggingface.co/docs/transformers/model_doc/bloom)** (from BigScience workshop) released by the [BigSicence Workshop](https://bigscience.huggingface.co/).
|
1. **[BLIP](https://huggingface.co/docs/transformers/model_doc/blip)** (from Salesforce) released with the paper [BLIP: Bootstrapping Language-Image Pre-training for Unified Vision-Language Understanding and Generation](https://arxiv.org/abs/2201.12086) by Junnan Li, Dongxu Li, Caiming Xiong, Steven Hoi.
|
||||||
|
1. **[BLIP-2](https://huggingface.co/docs/transformers/model_doc/blip-2)** (from Salesforce) released with the paper [BLIP-2: Bootstrapping Language-Image Pre-training with Frozen Image Encoders and Large Language Models](https://arxiv.org/abs/2301.12597) by Junnan Li, Dongxu Li, Silvio Savarese, Steven Hoi.
|
||||||
|
1. **[BLOOM](https://huggingface.co/docs/transformers/model_doc/bloom)** (from BigScience workshop) released by the [BigScience Workshop](https://bigscience.huggingface.co/).
|
||||||
1. **[BORT](https://huggingface.co/docs/transformers/model_doc/bort)** (from Alexa) released with the paper [Optimal Subarchitecture Extraction For BERT](https://arxiv.org/abs/2010.10499) by Adrian de Wynter and Daniel J. Perry.
|
1. **[BORT](https://huggingface.co/docs/transformers/model_doc/bort)** (from Alexa) released with the paper [Optimal Subarchitecture Extraction For BERT](https://arxiv.org/abs/2010.10499) by Adrian de Wynter and Daniel J. Perry.
|
||||||
|
1. **[BridgeTower](https://huggingface.co/docs/transformers/model_doc/bridgetower)** (from Harbin Institute of Technology/Microsoft Research Asia/Intel Labs) released with the paper [BridgeTower: Building Bridges Between Encoders in Vision-Language Representation Learning](https://arxiv.org/abs/2206.08657) by Xiao Xu, Chenfei Wu, Shachar Rosenman, Vasudev Lal, Wanxiang Che, Nan Duan.
|
||||||
1. **[ByT5](https://huggingface.co/docs/transformers/model_doc/byt5)** (from Google Research) released with the paper [ByT5: Towards a token-free future with pre-trained byte-to-byte models](https://arxiv.org/abs/2105.13626) by Linting Xue, Aditya Barua, Noah Constant, Rami Al-Rfou, Sharan Narang, Mihir Kale, Adam Roberts, Colin Raffel.
|
1. **[ByT5](https://huggingface.co/docs/transformers/model_doc/byt5)** (from Google Research) released with the paper [ByT5: Towards a token-free future with pre-trained byte-to-byte models](https://arxiv.org/abs/2105.13626) by Linting Xue, Aditya Barua, Noah Constant, Rami Al-Rfou, Sharan Narang, Mihir Kale, Adam Roberts, Colin Raffel.
|
||||||
1. **[CamemBERT](https://huggingface.co/docs/transformers/model_doc/camembert)** (from Inria/Facebook/Sorbonne) released with the paper [CamemBERT: a Tasty French Language Model](https://arxiv.org/abs/1911.03894) by Louis Martin*, Benjamin Muller*, Pedro Javier Ortiz Suárez*, Yoann Dupont, Laurent Romary, Éric Villemonte de la Clergerie, Djamé Seddah and Benoît Sagot.
|
1. **[CamemBERT](https://huggingface.co/docs/transformers/model_doc/camembert)** (from Inria/Facebook/Sorbonne) released with the paper [CamemBERT: a Tasty French Language Model](https://arxiv.org/abs/1911.03894) by Louis Martin*, Benjamin Muller*, Pedro Javier Ortiz Suárez*, Yoann Dupont, Laurent Romary, Éric Villemonte de la Clergerie, Djamé Seddah and Benoît Sagot.
|
||||||
1. **[CANINE](https://huggingface.co/docs/transformers/model_doc/canine)** (from Google Research) released with the paper [CANINE: Pre-training an Efficient Tokenization-Free Encoder for Language Representation](https://arxiv.org/abs/2103.06874) by Jonathan H. Clark, Dan Garrette, Iulia Turc, John Wieting.
|
1. **[CANINE](https://huggingface.co/docs/transformers/model_doc/canine)** (from Google Research) released with the paper [CANINE: Pre-training an Efficient Tokenization-Free Encoder for Language Representation](https://arxiv.org/abs/2103.06874) by Jonathan H. Clark, Dan Garrette, Iulia Turc, John Wieting.
|
||||||
|
1. **[Chinese-CLIP](https://huggingface.co/docs/transformers/model_doc/chinese_clip)** (from OFA-Sys) released with the paper [Chinese CLIP: Contrastive Vision-Language Pretraining in Chinese](https://arxiv.org/abs/2211.01335) by An Yang, Junshu Pan, Junyang Lin, Rui Men, Yichang Zhang, Jingren Zhou, Chang Zhou.
|
||||||
|
1. **[CLAP](https://huggingface.co/docs/transformers/model_doc/clap)** (from LAION-AI) released with the paper [Large-scale Contrastive Language-Audio Pretraining with Feature Fusion and Keyword-to-Caption Augmentation](https://arxiv.org/abs/2211.06687) by Yusong Wu, Ke Chen, Tianyu Zhang, Yuchen Hui, Taylor Berg-Kirkpatrick, Shlomo Dubnov.
|
||||||
1. **[CLIP](https://huggingface.co/docs/transformers/model_doc/clip)** (from OpenAI) released with the paper [Learning Transferable Visual Models From Natural Language Supervision](https://arxiv.org/abs/2103.00020) by Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, Ilya Sutskever.
|
1. **[CLIP](https://huggingface.co/docs/transformers/model_doc/clip)** (from OpenAI) released with the paper [Learning Transferable Visual Models From Natural Language Supervision](https://arxiv.org/abs/2103.00020) by Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, Ilya Sutskever.
|
||||||
|
1. **[CLIPSeg](https://huggingface.co/docs/transformers/model_doc/clipseg)** (from University of Göttingen) released with the paper [Image Segmentation Using Text and Image Prompts](https://arxiv.org/abs/2112.10003) by Timo Lüddecke and Alexander Ecker.
|
||||||
1. **[CodeGen](https://huggingface.co/docs/transformers/model_doc/codegen)** (from Salesforce) released with the paper [A Conversational Paradigm for Program Synthesis](https://arxiv.org/abs/2203.13474) by Erik Nijkamp, Bo Pang, Hiroaki Hayashi, Lifu Tu, Huan Wang, Yingbo Zhou, Silvio Savarese, Caiming Xiong.
|
1. **[CodeGen](https://huggingface.co/docs/transformers/model_doc/codegen)** (from Salesforce) released with the paper [A Conversational Paradigm for Program Synthesis](https://arxiv.org/abs/2203.13474) by Erik Nijkamp, Bo Pang, Hiroaki Hayashi, Lifu Tu, Huan Wang, Yingbo Zhou, Silvio Savarese, Caiming Xiong.
|
||||||
1. **[Conditional DETR](https://huggingface.co/docs/transformers/model_doc/conditional_detr)** (from Microsoft Research Asia) released with the paper [Conditional DETR for Fast Training Convergence](https://arxiv.org/abs/2108.06152) by Depu Meng, Xiaokang Chen, Zejia Fan, Gang Zeng, Houqiang Li, Yuhui Yuan, Lei Sun, Jingdong Wang.
|
1. **[Conditional DETR](https://huggingface.co/docs/transformers/model_doc/conditional_detr)** (from Microsoft Research Asia) released with the paper [Conditional DETR for Fast Training Convergence](https://arxiv.org/abs/2108.06152) by Depu Meng, Xiaokang Chen, Zejia Fan, Gang Zeng, Houqiang Li, Yuhui Yuan, Lei Sun, Jingdong Wang.
|
||||||
1. **[ConvBERT](https://huggingface.co/docs/transformers/model_doc/convbert)** (from YituTech) released with the paper [ConvBERT: Improving BERT with Span-based Dynamic Convolution](https://arxiv.org/abs/2008.02496) by Zihang Jiang, Weihao Yu, Daquan Zhou, Yunpeng Chen, Jiashi Feng, Shuicheng Yan.
|
1. **[ConvBERT](https://huggingface.co/docs/transformers/model_doc/convbert)** (from YituTech) released with the paper [ConvBERT: Improving BERT with Span-based Dynamic Convolution](https://arxiv.org/abs/2008.02496) by Zihang Jiang, Weihao Yu, Daquan Zhou, Yunpeng Chen, Jiashi Feng, Shuicheng Yan.
|
||||||
1. **[ConvNeXT](https://huggingface.co/docs/transformers/model_doc/convnext)** (from Facebook AI) released with the paper [A ConvNet for the 2020s](https://arxiv.org/abs/2201.03545) by Zhuang Liu, Hanzi Mao, Chao-Yuan Wu, Christoph Feichtenhofer, Trevor Darrell, Saining Xie.
|
1. **[ConvNeXT](https://huggingface.co/docs/transformers/model_doc/convnext)** (from Facebook AI) released with the paper [A ConvNet for the 2020s](https://arxiv.org/abs/2201.03545) by Zhuang Liu, Hanzi Mao, Chao-Yuan Wu, Christoph Feichtenhofer, Trevor Darrell, Saining Xie.
|
||||||
|
1. **[ConvNeXTV2](https://huggingface.co/docs/transformers/model_doc/convnextv2)** (from Facebook AI) released with the paper [ConvNeXt V2: Co-designing and Scaling ConvNets with Masked Autoencoders](https://arxiv.org/abs/2301.00808) by Sanghyun Woo, Shoubhik Debnath, Ronghang Hu, Xinlei Chen, Zhuang Liu, In So Kweon, Saining Xie.
|
||||||
1. **[CPM](https://huggingface.co/docs/transformers/model_doc/cpm)** (from Tsinghua University) released with the paper [CPM: A Large-scale Generative Chinese Pre-trained Language Model](https://arxiv.org/abs/2012.00413) by Zhengyan Zhang, Xu Han, Hao Zhou, Pei Ke, Yuxian Gu, Deming Ye, Yujia Qin, Yusheng Su, Haozhe Ji, Jian Guan, Fanchao Qi, Xiaozhi Wang, Yanan Zheng, Guoyang Zeng, Huanqi Cao, Shengqi Chen, Daixuan Li, Zhenbo Sun, Zhiyuan Liu, Minlie Huang, Wentao Han, Jie Tang, Juanzi Li, Xiaoyan Zhu, Maosong Sun.
|
1. **[CPM](https://huggingface.co/docs/transformers/model_doc/cpm)** (from Tsinghua University) released with the paper [CPM: A Large-scale Generative Chinese Pre-trained Language Model](https://arxiv.org/abs/2012.00413) by Zhengyan Zhang, Xu Han, Hao Zhou, Pei Ke, Yuxian Gu, Deming Ye, Yujia Qin, Yusheng Su, Haozhe Ji, Jian Guan, Fanchao Qi, Xiaozhi Wang, Yanan Zheng, Guoyang Zeng, Huanqi Cao, Shengqi Chen, Daixuan Li, Zhenbo Sun, Zhiyuan Liu, Minlie Huang, Wentao Han, Jie Tang, Juanzi Li, Xiaoyan Zhu, Maosong Sun.
|
||||||
|
1. **[CPM-Ant](https://huggingface.co/docs/transformers/model_doc/cpmant)** (from OpenBMB) released by the [OpenBMB](https://www.openbmb.org/).
|
||||||
1. **[CTRL](https://huggingface.co/docs/transformers/model_doc/ctrl)** (from Salesforce) released with the paper [CTRL: A Conditional Transformer Language Model for Controllable Generation](https://arxiv.org/abs/1909.05858) by Nitish Shirish Keskar*, Bryan McCann*, Lav R. Varshney, Caiming Xiong and Richard Socher.
|
1. **[CTRL](https://huggingface.co/docs/transformers/model_doc/ctrl)** (from Salesforce) released with the paper [CTRL: A Conditional Transformer Language Model for Controllable Generation](https://arxiv.org/abs/1909.05858) by Nitish Shirish Keskar*, Bryan McCann*, Lav R. Varshney, Caiming Xiong and Richard Socher.
|
||||||
1. **[CvT](https://huggingface.co/docs/transformers/model_doc/cvt)** (from Microsoft) released with the paper [CvT: Introducing Convolutions to Vision Transformers](https://arxiv.org/abs/2103.15808) by Haiping Wu, Bin Xiao, Noel Codella, Mengchen Liu, Xiyang Dai, Lu Yuan, Lei Zhang.
|
1. **[CvT](https://huggingface.co/docs/transformers/model_doc/cvt)** (from Microsoft) released with the paper [CvT: Introducing Convolutions to Vision Transformers](https://arxiv.org/abs/2103.15808) by Haiping Wu, Bin Xiao, Noel Codella, Mengchen Liu, Xiyang Dai, Lu Yuan, Lei Zhang.
|
||||||
1. **[Data2Vec](https://huggingface.co/docs/transformers/model_doc/data2vec)** (from Facebook) released with the paper [Data2Vec: A General Framework for Self-supervised Learning in Speech, Vision and Language](https://arxiv.org/abs/2202.03555) by Alexei Baevski, Wei-Ning Hsu, Qiantong Xu, Arun Babu, Jiatao Gu, Michael Auli.
|
1. **[Data2Vec](https://huggingface.co/docs/transformers/model_doc/data2vec)** (from Facebook) released with the paper [Data2Vec: A General Framework for Self-supervised Learning in Speech, Vision and Language](https://arxiv.org/abs/2202.03555) by Alexei Baevski, Wei-Ning Hsu, Qiantong Xu, Arun Babu, Jiatao Gu, Michael Auli.
|
||||||
@ -277,21 +294,33 @@ conda install -c huggingface transformers
|
|||||||
1. **[Decision Transformer](https://huggingface.co/docs/transformers/model_doc/decision_transformer)** (from Berkeley/Facebook/Google) released with the paper [Decision Transformer: Reinforcement Learning via Sequence Modeling](https://arxiv.org/abs/2106.01345) by Lili Chen, Kevin Lu, Aravind Rajeswaran, Kimin Lee, Aditya Grover, Michael Laskin, Pieter Abbeel, Aravind Srinivas, Igor Mordatch.
|
1. **[Decision Transformer](https://huggingface.co/docs/transformers/model_doc/decision_transformer)** (from Berkeley/Facebook/Google) released with the paper [Decision Transformer: Reinforcement Learning via Sequence Modeling](https://arxiv.org/abs/2106.01345) by Lili Chen, Kevin Lu, Aravind Rajeswaran, Kimin Lee, Aditya Grover, Michael Laskin, Pieter Abbeel, Aravind Srinivas, Igor Mordatch.
|
||||||
1. **[Deformable DETR](https://huggingface.co/docs/transformers/model_doc/deformable_detr)** (from SenseTime Research) released with the paper [Deformable DETR: Deformable Transformers for End-to-End Object Detection](https://arxiv.org/abs/2010.04159) by Xizhou Zhu, Weijie Su, Lewei Lu, Bin Li, Xiaogang Wang, Jifeng Dai.
|
1. **[Deformable DETR](https://huggingface.co/docs/transformers/model_doc/deformable_detr)** (from SenseTime Research) released with the paper [Deformable DETR: Deformable Transformers for End-to-End Object Detection](https://arxiv.org/abs/2010.04159) by Xizhou Zhu, Weijie Su, Lewei Lu, Bin Li, Xiaogang Wang, Jifeng Dai.
|
||||||
1. **[DeiT](https://huggingface.co/docs/transformers/model_doc/deit)** (from Facebook) released with the paper [Training data-efficient image transformers & distillation through attention](https://arxiv.org/abs/2012.12877) by Hugo Touvron, Matthieu Cord, Matthijs Douze, Francisco Massa, Alexandre Sablayrolles, Hervé Jégou.
|
1. **[DeiT](https://huggingface.co/docs/transformers/model_doc/deit)** (from Facebook) released with the paper [Training data-efficient image transformers & distillation through attention](https://arxiv.org/abs/2012.12877) by Hugo Touvron, Matthieu Cord, Matthijs Douze, Francisco Massa, Alexandre Sablayrolles, Hervé Jégou.
|
||||||
|
1. **[DePlot](https://huggingface.co/docs/transformers/model_doc/deplot)** (from Google AI) released with the paper [DePlot: One-shot visual language reasoning by plot-to-table translation](https://arxiv.org/abs/2212.10505) by Fangyu Liu, Julian Martin Eisenschlos, Francesco Piccinno, Syrine Krichene, Chenxi Pang, Kenton Lee, Mandar Joshi, Wenhu Chen, Nigel Collier, Yasemin Altun.
|
||||||
|
1. **[DETA](https://huggingface.co/docs/transformers/model_doc/deta)** (from The University of Texas at Austin) released with the paper [NMS Strikes Back](https://arxiv.org/abs/2212.06137) by Jeffrey Ouyang-Zhang, Jang Hyun Cho, Xingyi Zhou, Philipp Krähenbühl.
|
||||||
1. **[DETR](https://huggingface.co/docs/transformers/model_doc/detr)** (from Facebook) released with the paper [End-to-End Object Detection with Transformers](https://arxiv.org/abs/2005.12872) by Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, Sergey Zagoruyko.
|
1. **[DETR](https://huggingface.co/docs/transformers/model_doc/detr)** (from Facebook) released with the paper [End-to-End Object Detection with Transformers](https://arxiv.org/abs/2005.12872) by Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, Sergey Zagoruyko.
|
||||||
1. **[DialoGPT](https://huggingface.co/docs/transformers/model_doc/dialogpt)** (from Microsoft Research) released with the paper [DialoGPT: Large-Scale Generative Pre-training for Conversational Response Generation](https://arxiv.org/abs/1911.00536) by Yizhe Zhang, Siqi Sun, Michel Galley, Yen-Chun Chen, Chris Brockett, Xiang Gao, Jianfeng Gao, Jingjing Liu, Bill Dolan.
|
1. **[DialoGPT](https://huggingface.co/docs/transformers/model_doc/dialogpt)** (from Microsoft Research) released with the paper [DialoGPT: Large-Scale Generative Pre-training for Conversational Response Generation](https://arxiv.org/abs/1911.00536) by Yizhe Zhang, Siqi Sun, Michel Galley, Yen-Chun Chen, Chris Brockett, Xiang Gao, Jianfeng Gao, Jingjing Liu, Bill Dolan.
|
||||||
|
1. **[DiNAT](https://huggingface.co/docs/transformers/model_doc/dinat)** (from SHI Labs) released with the paper [Dilated Neighborhood Attention Transformer](https://arxiv.org/abs/2209.15001) by Ali Hassani and Humphrey Shi.
|
||||||
1. **[DistilBERT](https://huggingface.co/docs/transformers/model_doc/distilbert)** (from HuggingFace), released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same method has been applied to compress GPT2 into [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation), Multilingual BERT into [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German version of DistilBERT.
|
1. **[DistilBERT](https://huggingface.co/docs/transformers/model_doc/distilbert)** (from HuggingFace), released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same method has been applied to compress GPT2 into [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation), Multilingual BERT into [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German version of DistilBERT.
|
||||||
1. **[DiT](https://huggingface.co/docs/transformers/model_doc/dit)** (from Microsoft Research) released with the paper [DiT: Self-supervised Pre-training for Document Image Transformer](https://arxiv.org/abs/2203.02378) by Junlong Li, Yiheng Xu, Tengchao Lv, Lei Cui, Cha Zhang, Furu Wei.
|
1. **[DiT](https://huggingface.co/docs/transformers/model_doc/dit)** (from Microsoft Research) released with the paper [DiT: Self-supervised Pre-training for Document Image Transformer](https://arxiv.org/abs/2203.02378) by Junlong Li, Yiheng Xu, Tengchao Lv, Lei Cui, Cha Zhang, Furu Wei.
|
||||||
1. **[Donut](https://huggingface.co/docs/transformers/model_doc/donut)** (from NAVER) released with the paper [OCR-free Document Understanding Transformer](https://arxiv.org/abs/2111.15664) by Geewook Kim, Teakgyu Hong, Moonbin Yim, Jeongyeon Nam, Jinyoung Park, Jinyeong Yim, Wonseok Hwang, Sangdoo Yun, Dongyoon Han, Seunghyun Park.
|
1. **[Donut](https://huggingface.co/docs/transformers/model_doc/donut)** (from NAVER) released with the paper [OCR-free Document Understanding Transformer](https://arxiv.org/abs/2111.15664) by Geewook Kim, Teakgyu Hong, Moonbin Yim, Jeongyeon Nam, Jinyoung Park, Jinyeong Yim, Wonseok Hwang, Sangdoo Yun, Dongyoon Han, Seunghyun Park.
|
||||||
1. **[DPR](https://huggingface.co/docs/transformers/model_doc/dpr)** (from Facebook) released with the paper [Dense Passage Retrieval for Open-Domain Question Answering](https://arxiv.org/abs/2004.04906) by Vladimir Karpukhin, Barlas Oğuz, Sewon Min, Patrick Lewis, Ledell Wu, Sergey Edunov, Danqi Chen, and Wen-tau Yih.
|
1. **[DPR](https://huggingface.co/docs/transformers/model_doc/dpr)** (from Facebook) released with the paper [Dense Passage Retrieval for Open-Domain Question Answering](https://arxiv.org/abs/2004.04906) by Vladimir Karpukhin, Barlas Oğuz, Sewon Min, Patrick Lewis, Ledell Wu, Sergey Edunov, Danqi Chen, and Wen-tau Yih.
|
||||||
1. **[DPT](https://huggingface.co/docs/transformers/master/model_doc/dpt)** (from Intel Labs) released with the paper [Vision Transformers for Dense Prediction](https://arxiv.org/abs/2103.13413) by René Ranftl, Alexey Bochkovskiy, Vladlen Koltun.
|
1. **[DPT](https://huggingface.co/docs/transformers/master/model_doc/dpt)** (from Intel Labs) released with the paper [Vision Transformers for Dense Prediction](https://arxiv.org/abs/2103.13413) by René Ranftl, Alexey Bochkovskiy, Vladlen Koltun.
|
||||||
|
1. **[EfficientFormer](https://huggingface.co/docs/transformers/model_doc/efficientformer)** (from Snap Research) released with the paper [EfficientFormer: Vision Transformers at MobileNetSpeed](https://arxiv.org/abs/2206.01191) by Yanyu Li, Geng Yuan, Yang Wen, Ju Hu, Georgios Evangelidis, Sergey Tulyakov, Yanzhi Wang, Jian Ren.
|
||||||
|
1. **[EfficientNet](https://huggingface.co/docs/transformers/model_doc/efficientnet)** (from Google Brain) released with the paper [EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks](https://arxiv.org/abs/1905.11946) by Mingxing Tan, Quoc V. Le.
|
||||||
1. **[ELECTRA](https://huggingface.co/docs/transformers/model_doc/electra)** (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang Luong, Quoc V. Le, Christopher D. Manning.
|
1. **[ELECTRA](https://huggingface.co/docs/transformers/model_doc/electra)** (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang Luong, Quoc V. Le, Christopher D. Manning.
|
||||||
|
1. **[EnCodec](https://huggingface.co/docs/transformers/model_doc/encodec)** (from Meta AI) released with the paper [High Fidelity Neural Audio Compression](https://arxiv.org/abs/2210.13438) by Alexandre Défossez, Jade Copet, Gabriel Synnaeve, Yossi Adi.
|
||||||
1. **[EncoderDecoder](https://huggingface.co/docs/transformers/model_doc/encoder-decoder)** (from Google Research) released with the paper [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://arxiv.org/abs/1907.12461) by Sascha Rothe, Shashi Narayan, Aliaksei Severyn.
|
1. **[EncoderDecoder](https://huggingface.co/docs/transformers/model_doc/encoder-decoder)** (from Google Research) released with the paper [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://arxiv.org/abs/1907.12461) by Sascha Rothe, Shashi Narayan, Aliaksei Severyn.
|
||||||
1. **[ERNIE](https://huggingface.co/docs/transformers/model_doc/ernie)** (from Baidu) released with the paper [ERNIE: Enhanced Representation through Knowledge Integration](https://arxiv.org/abs/1904.09223) by Yu Sun, Shuohuan Wang, Yukun Li, Shikun Feng, Xuyi Chen, Han Zhang, Xin Tian, Danxiang Zhu, Hao Tian, Hua Wu.
|
1. **[ERNIE](https://huggingface.co/docs/transformers/model_doc/ernie)** (from Baidu) released with the paper [ERNIE: Enhanced Representation through Knowledge Integration](https://arxiv.org/abs/1904.09223) by Yu Sun, Shuohuan Wang, Yukun Li, Shikun Feng, Xuyi Chen, Han Zhang, Xin Tian, Danxiang Zhu, Hao Tian, Hua Wu.
|
||||||
|
1. **[ErnieM](https://huggingface.co/docs/transformers/model_doc/ernie_m)** (from Baidu) released with the paper [ERNIE-M: Enhanced Multilingual Representation by Aligning Cross-lingual Semantics with Monolingual Corpora](https://arxiv.org/abs/2012.15674) by Xuan Ouyang, Shuohuan Wang, Chao Pang, Yu Sun, Hao Tian, Hua Wu, Haifeng Wang.
|
||||||
1. **[ESM](https://huggingface.co/docs/transformers/model_doc/esm)** (from Meta AI) are transformer protein language models. **ESM-1b** was released with the paper [Biological structure and function emerge from scaling unsupervised learning to 250 million protein sequences](https://www.pnas.org/content/118/15/e2016239118) by Alexander Rives, Joshua Meier, Tom Sercu, Siddharth Goyal, Zeming Lin, Jason Liu, Demi Guo, Myle Ott, C. Lawrence Zitnick, Jerry Ma, and Rob Fergus. **ESM-1v** was released with the paper [Language models enable zero-shot prediction of the effects of mutations on protein function](https://doi.org/10.1101/2021.07.09.450648) by Joshua Meier, Roshan Rao, Robert Verkuil, Jason Liu, Tom Sercu and Alexander Rives. **ESM-2** was released with the paper [Language models of protein sequences at the scale of evolution enable accurate structure prediction](https://doi.org/10.1101/2022.07.20.500902) by Zeming Lin, Halil Akin, Roshan Rao, Brian Hie, Zhongkai Zhu, Wenting Lu, Allan dos Santos Costa, Maryam Fazel-Zarandi, Tom Sercu, Sal Candido, Alexander Rives.
|
1. **[ESM](https://huggingface.co/docs/transformers/model_doc/esm)** (from Meta AI) are transformer protein language models. **ESM-1b** was released with the paper [Biological structure and function emerge from scaling unsupervised learning to 250 million protein sequences](https://www.pnas.org/content/118/15/e2016239118) by Alexander Rives, Joshua Meier, Tom Sercu, Siddharth Goyal, Zeming Lin, Jason Liu, Demi Guo, Myle Ott, C. Lawrence Zitnick, Jerry Ma, and Rob Fergus. **ESM-1v** was released with the paper [Language models enable zero-shot prediction of the effects of mutations on protein function](https://doi.org/10.1101/2021.07.09.450648) by Joshua Meier, Roshan Rao, Robert Verkuil, Jason Liu, Tom Sercu and Alexander Rives. **ESM-2** was released with the paper [Language models of protein sequences at the scale of evolution enable accurate structure prediction](https://doi.org/10.1101/2022.07.20.500902) by Zeming Lin, Halil Akin, Roshan Rao, Brian Hie, Zhongkai Zhu, Wenting Lu, Allan dos Santos Costa, Maryam Fazel-Zarandi, Tom Sercu, Sal Candido, Alexander Rives.
|
||||||
|
1. **[Falcon](https://huggingface.co/docs/transformers/model_doc/falcon)** (from Technology Innovation Institute) by Almazrouei, Ebtesam and Alobeidli, Hamza and Alshamsi, Abdulaziz and Cappelli, Alessandro and Cojocaru, Ruxandra and Debbah, Merouane and Goffinet, Etienne and Heslow, Daniel and Launay, Julien and Malartic, Quentin and Noune, Badreddine and Pannier, Baptiste and Penedo, Guilherme.
|
||||||
|
1. **[FLAN-T5](https://huggingface.co/docs/transformers/model_doc/flan-t5)** (from Google AI) released in the repository [google-research/t5x](https://github.com/google-research/t5x/blob/main/docs/models.md#flan-t5-checkpoints) by Hyung Won Chung, Le Hou, Shayne Longpre, Barret Zoph, Yi Tay, William Fedus, Eric Li, Xuezhi Wang, Mostafa Dehghani, Siddhartha Brahma, Albert Webson, Shixiang Shane Gu, Zhuyun Dai, Mirac Suzgun, Xinyun Chen, Aakanksha Chowdhery, Sharan Narang, Gaurav Mishra, Adams Yu, Vincent Zhao, Yanping Huang, Andrew Dai, Hongkun Yu, Slav Petrov, Ed H. Chi, Jeff Dean, Jacob Devlin, Adam Roberts, Denny Zhou, Quoc V. Le, and Jason Wei
|
||||||
|
1. **[FLAN-UL2](https://huggingface.co/docs/transformers/model_doc/flan-ul2)** (from Google AI) released in the repository [google-research/t5x](https://github.com/google-research/t5x/blob/main/docs/models.md#flan-ul2-checkpoints) by Hyung Won Chung, Le Hou, Shayne Longpre, Barret Zoph, Yi Tay, William Fedus, Eric Li, Xuezhi Wang, Mostafa Dehghani, Siddhartha Brahma, Albert Webson, Shixiang Shane Gu, Zhuyun Dai, Mirac Suzgun, Xinyun Chen, Aakanksha Chowdhery, Sharan Narang, Gaurav Mishra, Adams Yu, Vincent Zhao, Yanping Huang, Andrew Dai, Hongkun Yu, Slav Petrov, Ed H. Chi, Jeff Dean, Jacob Devlin, Adam Roberts, Denny Zhou, Quoc V. Le, and Jason Wei
|
||||||
1. **[FlauBERT](https://huggingface.co/docs/transformers/model_doc/flaubert)** (from CNRS) released with the paper [FlauBERT: Unsupervised Language Model Pre-training for French](https://arxiv.org/abs/1912.05372) by Hang Le, Loïc Vial, Jibril Frej, Vincent Segonne, Maximin Coavoux, Benjamin Lecouteux, Alexandre Allauzen, Benoît Crabbé, Laurent Besacier, Didier Schwab.
|
1. **[FlauBERT](https://huggingface.co/docs/transformers/model_doc/flaubert)** (from CNRS) released with the paper [FlauBERT: Unsupervised Language Model Pre-training for French](https://arxiv.org/abs/1912.05372) by Hang Le, Loïc Vial, Jibril Frej, Vincent Segonne, Maximin Coavoux, Benjamin Lecouteux, Alexandre Allauzen, Benoît Crabbé, Laurent Besacier, Didier Schwab.
|
||||||
1. **[FLAVA](https://huggingface.co/docs/transformers/model_doc/flava)** (from Facebook AI) released with the paper [FLAVA: A Foundational Language And Vision Alignment Model](https://arxiv.org/abs/2112.04482) by Amanpreet Singh, Ronghang Hu, Vedanuj Goswami, Guillaume Couairon, Wojciech Galuba, Marcus Rohrbach, and Douwe Kiela.
|
1. **[FLAVA](https://huggingface.co/docs/transformers/model_doc/flava)** (from Facebook AI) released with the paper [FLAVA: A Foundational Language And Vision Alignment Model](https://arxiv.org/abs/2112.04482) by Amanpreet Singh, Ronghang Hu, Vedanuj Goswami, Guillaume Couairon, Wojciech Galuba, Marcus Rohrbach, and Douwe Kiela.
|
||||||
1. **[FNet](https://huggingface.co/docs/transformers/model_doc/fnet)** (from Google Research) released with the paper [FNet: Mixing Tokens with Fourier Transforms](https://arxiv.org/abs/2105.03824) by James Lee-Thorp, Joshua Ainslie, Ilya Eckstein, Santiago Ontanon.
|
1. **[FNet](https://huggingface.co/docs/transformers/model_doc/fnet)** (from Google Research) released with the paper [FNet: Mixing Tokens with Fourier Transforms](https://arxiv.org/abs/2105.03824) by James Lee-Thorp, Joshua Ainslie, Ilya Eckstein, Santiago Ontanon.
|
||||||
|
1. **[FocalNet](https://huggingface.co/docs/transformers/model_doc/focalnet)** (from Microsoft Research) released with the paper [Focal Modulation Networks](https://arxiv.org/abs/2203.11926) by Jianwei Yang, Chunyuan Li, Xiyang Dai, Lu Yuan, Jianfeng Gao.
|
||||||
1. **[Funnel Transformer](https://huggingface.co/docs/transformers/model_doc/funnel)** (from CMU/Google Brain) released with the paper [Funnel-Transformer: Filtering out Sequential Redundancy for Efficient Language Processing](https://arxiv.org/abs/2006.03236) by Zihang Dai, Guokun Lai, Yiming Yang, Quoc V. Le.
|
1. **[Funnel Transformer](https://huggingface.co/docs/transformers/model_doc/funnel)** (from CMU/Google Brain) released with the paper [Funnel-Transformer: Filtering out Sequential Redundancy for Efficient Language Processing](https://arxiv.org/abs/2006.03236) by Zihang Dai, Guokun Lai, Yiming Yang, Quoc V. Le.
|
||||||
|
1. **[GIT](https://huggingface.co/docs/transformers/model_doc/git)** (from Microsoft Research) released with the paper [GIT: A Generative Image-to-text Transformer for Vision and Language](https://arxiv.org/abs/2205.14100) by Jianfeng Wang, Zhengyuan Yang, Xiaowei Hu, Linjie Li, Kevin Lin, Zhe Gan, Zicheng Liu, Ce Liu, Lijuan Wang.
|
||||||
1. **[GLPN](https://huggingface.co/docs/transformers/model_doc/glpn)** (from KAIST) released with the paper [Global-Local Path Networks for Monocular Depth Estimation with Vertical CutDepth](https://arxiv.org/abs/2201.07436) by Doyeon Kim, Woonghyun Ga, Pyungwhan Ahn, Donggyu Joo, Sehwan Chun, Junmo Kim.
|
1. **[GLPN](https://huggingface.co/docs/transformers/model_doc/glpn)** (from KAIST) released with the paper [Global-Local Path Networks for Monocular Depth Estimation with Vertical CutDepth](https://arxiv.org/abs/2201.07436) by Doyeon Kim, Woonghyun Ga, Pyungwhan Ahn, Donggyu Joo, Sehwan Chun, Junmo Kim.
|
||||||
1. **[GPT](https://huggingface.co/docs/transformers/model_doc/openai-gpt)** (from OpenAI) released with the paper [Improving Language Understanding by Generative Pre-Training](https://blog.openai.com/language-unsupervised/) by Alec Radford, Karthik Narasimhan, Tim Salimans and Ilya Sutskever.
|
1. **[GPT](https://huggingface.co/docs/transformers/model_doc/openai-gpt)** (from OpenAI) released with the paper [Improving Language Understanding by Generative Pre-Training](https://blog.openai.com/language-unsupervised/) by Alec Radford, Karthik Narasimhan, Tim Salimans and Ilya Sutskever.
|
||||||
1. **[GPT Neo](https://huggingface.co/docs/transformers/model_doc/gpt_neo)** (from EleutherAI) released in the repository [EleutherAI/gpt-neo](https://github.com/EleutherAI/gpt-neo) by Sid Black, Stella Biderman, Leo Gao, Phil Wang and Connor Leahy.
|
1. **[GPT Neo](https://huggingface.co/docs/transformers/model_doc/gpt_neo)** (from EleutherAI) released in the repository [EleutherAI/gpt-neo](https://github.com/EleutherAI/gpt-neo) by Sid Black, Stella Biderman, Leo Gao, Phil Wang and Connor Leahy.
|
||||||
@ -299,17 +328,26 @@ conda install -c huggingface transformers
|
|||||||
1. **[GPT NeoX Japanese](https://huggingface.co/docs/transformers/model_doc/gpt_neox_japanese)** (from ABEJA) released by Shinya Otani, Takayoshi Makabe, Anuj Arora, and Kyo Hattori.
|
1. **[GPT NeoX Japanese](https://huggingface.co/docs/transformers/model_doc/gpt_neox_japanese)** (from ABEJA) released by Shinya Otani, Takayoshi Makabe, Anuj Arora, and Kyo Hattori.
|
||||||
1. **[GPT-2](https://huggingface.co/docs/transformers/model_doc/gpt2)** (from OpenAI) released with the paper [Language Models are Unsupervised Multitask Learners](https://blog.openai.com/better-language-models/) by Alec Radford*, Jeffrey Wu*, Rewon Child, David Luan, Dario Amodei** and Ilya Sutskever**.
|
1. **[GPT-2](https://huggingface.co/docs/transformers/model_doc/gpt2)** (from OpenAI) released with the paper [Language Models are Unsupervised Multitask Learners](https://blog.openai.com/better-language-models/) by Alec Radford*, Jeffrey Wu*, Rewon Child, David Luan, Dario Amodei** and Ilya Sutskever**.
|
||||||
1. **[GPT-J](https://huggingface.co/docs/transformers/model_doc/gptj)** (from EleutherAI) released with the paper [kingoflolz/mesh-transformer-jax](https://github.com/kingoflolz/mesh-transformer-jax/) by Ben Wang and Aran Komatsuzaki.
|
1. **[GPT-J](https://huggingface.co/docs/transformers/model_doc/gptj)** (from EleutherAI) released with the paper [kingoflolz/mesh-transformer-jax](https://github.com/kingoflolz/mesh-transformer-jax/) by Ben Wang and Aran Komatsuzaki.
|
||||||
|
1. **[GPT-Sw3](https://huggingface.co/docs/transformers/model_doc/gpt-sw3)** (from AI-Sweden) released with the paper [Lessons Learned from GPT-SW3: Building the First Large-Scale Generative Language Model for Swedish](http://www.lrec-conf.org/proceedings/lrec2022/pdf/2022.lrec-1.376.pdf) by Ariel Ekgren, Amaru Cuba Gyllensten, Evangelia Gogoulou, Alice Heiman, Severine Verlinden, Joey Öhman, Fredrik Carlsson, Magnus Sahlgren.
|
||||||
|
1. **[GPTBigCode](https://huggingface.co/docs/transformers/model_doc/gpt_bigcode)** (from BigCode) released with the paper [SantaCoder: don't reach for the stars!](https://arxiv.org/abs/2301.03988) by Loubna Ben Allal, Raymond Li, Denis Kocetkov, Chenghao Mou, Christopher Akiki, Carlos Munoz Ferrandis, Niklas Muennighoff, Mayank Mishra, Alex Gu, Manan Dey, Logesh Kumar Umapathi, Carolyn Jane Anderson, Yangtian Zi, Joel Lamy Poirier, Hailey Schoelkopf, Sergey Troshin, Dmitry Abulkhanov, Manuel Romero, Michael Lappert, Francesco De Toni, Bernardo García del Río, Qian Liu, Shamik Bose, Urvashi Bhattacharyya, Terry Yue Zhuo, Ian Yu, Paulo Villegas, Marco Zocca, Sourab Mangrulkar, David Lansky, Huu Nguyen, Danish Contractor, Luis Villa, Jia Li, Dzmitry Bahdanau, Yacine Jernite, Sean Hughes, Daniel Fried, Arjun Guha, Harm de Vries, Leandro von Werra.
|
||||||
|
1. **[GPTSAN-japanese](https://huggingface.co/docs/transformers/model_doc/gptsan-japanese)** released in the repository [tanreinama/GPTSAN](https://github.com/tanreinama/GPTSAN/blob/main/report/model.md) by 坂本俊之(tanreinama).
|
||||||
|
1. **[Graphormer](https://huggingface.co/docs/transformers/model_doc/graphormer)** (from Microsoft) released with the paper [Do Transformers Really Perform Bad for Graph Representation?](https://arxiv.org/abs/2106.05234) by Chengxuan Ying, Tianle Cai, Shengjie Luo, Shuxin Zheng, Guolin Ke, Di He, Yanming Shen, Tie-Yan Liu.
|
||||||
1. **[GroupViT](https://huggingface.co/docs/transformers/model_doc/groupvit)** (from UCSD, NVIDIA) released with the paper [GroupViT: Semantic Segmentation Emerges from Text Supervision](https://arxiv.org/abs/2202.11094) by Jiarui Xu, Shalini De Mello, Sifei Liu, Wonmin Byeon, Thomas Breuel, Jan Kautz, Xiaolong Wang.
|
1. **[GroupViT](https://huggingface.co/docs/transformers/model_doc/groupvit)** (from UCSD, NVIDIA) released with the paper [GroupViT: Semantic Segmentation Emerges from Text Supervision](https://arxiv.org/abs/2202.11094) by Jiarui Xu, Shalini De Mello, Sifei Liu, Wonmin Byeon, Thomas Breuel, Jan Kautz, Xiaolong Wang.
|
||||||
1. **[Hubert](https://huggingface.co/docs/transformers/model_doc/hubert)** (from Facebook) released with the paper [HuBERT: Self-Supervised Speech Representation Learning by Masked Prediction of Hidden Units](https://arxiv.org/abs/2106.07447) by Wei-Ning Hsu, Benjamin Bolte, Yao-Hung Hubert Tsai, Kushal Lakhotia, Ruslan Salakhutdinov, Abdelrahman Mohamed.
|
1. **[Hubert](https://huggingface.co/docs/transformers/model_doc/hubert)** (from Facebook) released with the paper [HuBERT: Self-Supervised Speech Representation Learning by Masked Prediction of Hidden Units](https://arxiv.org/abs/2106.07447) by Wei-Ning Hsu, Benjamin Bolte, Yao-Hung Hubert Tsai, Kushal Lakhotia, Ruslan Salakhutdinov, Abdelrahman Mohamed.
|
||||||
1. **[I-BERT](https://huggingface.co/docs/transformers/model_doc/ibert)** (from Berkeley) released with the paper [I-BERT: Integer-only BERT Quantization](https://arxiv.org/abs/2101.01321) by Sehoon Kim, Amir Gholami, Zhewei Yao, Michael W. Mahoney, Kurt Keutzer.
|
1. **[I-BERT](https://huggingface.co/docs/transformers/model_doc/ibert)** (from Berkeley) released with the paper [I-BERT: Integer-only BERT Quantization](https://arxiv.org/abs/2101.01321) by Sehoon Kim, Amir Gholami, Zhewei Yao, Michael W. Mahoney, Kurt Keutzer.
|
||||||
1. **[ImageGPT](https://huggingface.co/docs/transformers/model_doc/imagegpt)** (from OpenAI) released with the paper [Generative Pretraining from Pixels](https://openai.com/blog/image-gpt/) by Mark Chen, Alec Radford, Rewon Child, Jeffrey Wu, Heewoo Jun, David Luan, Ilya Sutskever.
|
1. **[ImageGPT](https://huggingface.co/docs/transformers/model_doc/imagegpt)** (from OpenAI) released with the paper [Generative Pretraining from Pixels](https://openai.com/blog/image-gpt/) by Mark Chen, Alec Radford, Rewon Child, Jeffrey Wu, Heewoo Jun, David Luan, Ilya Sutskever.
|
||||||
|
1. **[Informer](https://huggingface.co/docs/transformers/model_doc/informer)** (from Beihang University, UC Berkeley, Rutgers University, SEDD Company) released with the paper [Informer: Beyond Efficient Transformer for Long Sequence Time-Series Forecasting](https://arxiv.org/abs/2012.07436) by Haoyi Zhou, Shanghang Zhang, Jieqi Peng, Shuai Zhang, Jianxin Li, Hui Xiong, and Wancai Zhang.
|
||||||
|
1. **[InstructBLIP](https://huggingface.co/docs/transformers/model_doc/instructblip)** (from Salesforce) released with the paper [InstructBLIP: Towards General-purpose Vision-Language Models with Instruction Tuning](https://arxiv.org/abs/2305.06500) by Wenliang Dai, Junnan Li, Dongxu Li, Anthony Meng Huat Tiong, Junqi Zhao, Weisheng Wang, Boyang Li, Pascale Fung, Steven Hoi.
|
||||||
|
1. **[Jukebox](https://huggingface.co/docs/transformers/model_doc/jukebox)** (from OpenAI) released with the paper [Jukebox: A Generative Model for Music](https://arxiv.org/pdf/2005.00341.pdf) by Prafulla Dhariwal, Heewoo Jun, Christine Payne, Jong Wook Kim, Alec Radford, Ilya Sutskever.
|
||||||
1. **[LayoutLM](https://huggingface.co/docs/transformers/model_doc/layoutlm)** (from Microsoft Research Asia) released with the paper [LayoutLM: Pre-training of Text and Layout for Document Image Understanding](https://arxiv.org/abs/1912.13318) by Yiheng Xu, Minghao Li, Lei Cui, Shaohan Huang, Furu Wei, Ming Zhou.
|
1. **[LayoutLM](https://huggingface.co/docs/transformers/model_doc/layoutlm)** (from Microsoft Research Asia) released with the paper [LayoutLM: Pre-training of Text and Layout for Document Image Understanding](https://arxiv.org/abs/1912.13318) by Yiheng Xu, Minghao Li, Lei Cui, Shaohan Huang, Furu Wei, Ming Zhou.
|
||||||
1. **[LayoutLMv2](https://huggingface.co/docs/transformers/model_doc/layoutlmv2)** (from Microsoft Research Asia) released with the paper [LayoutLMv2: Multi-modal Pre-training for Visually-Rich Document Understanding](https://arxiv.org/abs/2012.14740) by Yang Xu, Yiheng Xu, Tengchao Lv, Lei Cui, Furu Wei, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Wanxiang Che, Min Zhang, Lidong Zhou.
|
1. **[LayoutLMv2](https://huggingface.co/docs/transformers/model_doc/layoutlmv2)** (from Microsoft Research Asia) released with the paper [LayoutLMv2: Multi-modal Pre-training for Visually-Rich Document Understanding](https://arxiv.org/abs/2012.14740) by Yang Xu, Yiheng Xu, Tengchao Lv, Lei Cui, Furu Wei, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Wanxiang Che, Min Zhang, Lidong Zhou.
|
||||||
1. **[LayoutLMv3](https://huggingface.co/docs/transformers/model_doc/layoutlmv3)** (from Microsoft Research Asia) released with the paper [LayoutLMv3: Pre-training for Document AI with Unified Text and Image Masking](https://arxiv.org/abs/2204.08387) by Yupan Huang, Tengchao Lv, Lei Cui, Yutong Lu, Furu Wei.
|
1. **[LayoutLMv3](https://huggingface.co/docs/transformers/model_doc/layoutlmv3)** (from Microsoft Research Asia) released with the paper [LayoutLMv3: Pre-training for Document AI with Unified Text and Image Masking](https://arxiv.org/abs/2204.08387) by Yupan Huang, Tengchao Lv, Lei Cui, Yutong Lu, Furu Wei.
|
||||||
1. **[LayoutXLM](https://huggingface.co/docs/transformers/model_doc/layoutxlm)** (from Microsoft Research Asia) released with the paper [LayoutXLM: Multimodal Pre-training for Multilingual Visually-rich Document Understanding](https://arxiv.org/abs/2104.08836) by Yiheng Xu, Tengchao Lv, Lei Cui, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Furu Wei.
|
1. **[LayoutXLM](https://huggingface.co/docs/transformers/model_doc/layoutxlm)** (from Microsoft Research Asia) released with the paper [LayoutXLM: Multimodal Pre-training for Multilingual Visually-rich Document Understanding](https://arxiv.org/abs/2104.08836) by Yiheng Xu, Tengchao Lv, Lei Cui, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Furu Wei.
|
||||||
1. **[LED](https://huggingface.co/docs/transformers/model_doc/led)** (from AllenAI) released with the paper [Longformer: The Long-Document Transformer](https://arxiv.org/abs/2004.05150) by Iz Beltagy, Matthew E. Peters, Arman Cohan.
|
1. **[LED](https://huggingface.co/docs/transformers/model_doc/led)** (from AllenAI) released with the paper [Longformer: The Long-Document Transformer](https://arxiv.org/abs/2004.05150) by Iz Beltagy, Matthew E. Peters, Arman Cohan.
|
||||||
1. **[LeViT](https://huggingface.co/docs/transformers/model_doc/levit)** (from Meta AI) released with the paper [LeViT: A Vision Transformer in ConvNet's Clothing for Faster Inference](https://arxiv.org/abs/2104.01136) by Ben Graham, Alaaeldin El-Nouby, Hugo Touvron, Pierre Stock, Armand Joulin, Hervé Jégou, Matthijs Douze.
|
1. **[LeViT](https://huggingface.co/docs/transformers/model_doc/levit)** (from Meta AI) released with the paper [LeViT: A Vision Transformer in ConvNet's Clothing for Faster Inference](https://arxiv.org/abs/2104.01136) by Ben Graham, Alaaeldin El-Nouby, Hugo Touvron, Pierre Stock, Armand Joulin, Hervé Jégou, Matthijs Douze.
|
||||||
1. **[LiLT](https://huggingface.co/docs/transformers/main/model_doc/lilt)** (from South China University of Technology) released with the paper [LiLT: A Simple yet Effective Language-Independent Layout Transformer for Structured Document Understanding](https://arxiv.org/abs/2202.13669) by Jiapeng Wang, Lianwen Jin, Kai Ding.
|
1. **[LiLT](https://huggingface.co/docs/transformers/model_doc/lilt)** (from South China University of Technology) released with the paper [LiLT: A Simple yet Effective Language-Independent Layout Transformer for Structured Document Understanding](https://arxiv.org/abs/2202.13669) by Jiapeng Wang, Lianwen Jin, Kai Ding.
|
||||||
|
1. **[LLaMA](https://huggingface.co/docs/transformers/model_doc/llama)** (from The FAIR team of Meta AI) released with the paper [LLaMA: Open and Efficient Foundation Language Models](https://arxiv.org/abs/2302.13971) by Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timothée Lacroix, Baptiste Rozière, Naman Goyal, Eric Hambro, Faisal Azhar, Aurelien Rodriguez, Armand Joulin, Edouard Grave, Guillaume Lample.
|
||||||
|
1. **[Llama2](https://huggingface.co/docs/transformers/model_doc/llama2)** (from The FAIR team of Meta AI) released with the paper [Llama2: Open Foundation and Fine-Tuned Chat Models](https://ai.meta.com/research/publications/llama-2-open-foundation-and-fine-tuned-chat-models/XXX) by Hugo Touvron, Louis Martin, Kevin Stone, Peter Albert, Amjad Almahairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajjwal Bhargava, Shruti Bhosale, Dan Bikel, Lukas Blecher, Cristian Canton Ferrer, Moya Chen, Guillem Cucurull, David Esiobu, Jude Fernandes, Jeremy Fu, Wenyin Fu, Brian Fuller, Cynthia Gao, Vedanuj Goswami, Naman Goyal, Anthony Hartshorn, Saghar Hosseini, Rui Hou, Hakan Inan, Marcin Kardas, Viktor Kerkez Madian Khabsa, Isabel Kloumann, Artem Korenev, Punit Singh Koura, Marie-Anne Lachaux, Thibaut Lavril, Jenya Lee, Diana Liskovich, Yinghai Lu, Yuning Mao, Xavier Martinet, Todor Mihaylov, Pushka rMishra, Igor Molybog, Yixin Nie, Andrew Poulton, Jeremy Reizenstein, Rashi Rungta, Kalyan Saladi, Alan Schelten, Ruan Silva, Eric Michael Smith, Ranjan Subramanian, Xiaoqing EllenTan, Binh Tang, Ross Taylor, Adina Williams, Jian Xiang Kuan, Puxin Xu, Zheng Yan, Iliyan Zarov, Yuchen Zhang, Angela Fan, Melanie Kambadur, Sharan Narang, Aurelien Rodriguez, Robert Stojnic, Sergey Edunov, Thomas Scialom..
|
||||||
1. **[Longformer](https://huggingface.co/docs/transformers/model_doc/longformer)** (from AllenAI) released with the paper [Longformer: The Long-Document Transformer](https://arxiv.org/abs/2004.05150) by Iz Beltagy, Matthew E. Peters, Arman Cohan.
|
1. **[Longformer](https://huggingface.co/docs/transformers/model_doc/longformer)** (from AllenAI) released with the paper [Longformer: The Long-Document Transformer](https://arxiv.org/abs/2004.05150) by Iz Beltagy, Matthew E. Peters, Arman Cohan.
|
||||||
1. **[LongT5](https://huggingface.co/docs/transformers/model_doc/longt5)** (from Google AI) released with the paper [LongT5: Efficient Text-To-Text Transformer for Long Sequences](https://arxiv.org/abs/2112.07916) by Mandy Guo, Joshua Ainslie, David Uthus, Santiago Ontanon, Jianmo Ni, Yun-Hsuan Sung, Yinfei Yang.
|
1. **[LongT5](https://huggingface.co/docs/transformers/model_doc/longt5)** (from Google AI) released with the paper [LongT5: Efficient Text-To-Text Transformer for Long Sequences](https://arxiv.org/abs/2112.07916) by Mandy Guo, Joshua Ainslie, David Uthus, Santiago Ontanon, Jianmo Ni, Yun-Hsuan Sung, Yinfei Yang.
|
||||||
1. **[LUKE](https://huggingface.co/docs/transformers/model_doc/luke)** (from Studio Ousia) released with the paper [LUKE: Deep Contextualized Entity Representations with Entity-aware Self-attention](https://arxiv.org/abs/2010.01057) by Ikuya Yamada, Akari Asai, Hiroyuki Shindo, Hideaki Takeda, Yuji Matsumoto.
|
1. **[LUKE](https://huggingface.co/docs/transformers/model_doc/luke)** (from Studio Ousia) released with the paper [LUKE: Deep Contextualized Entity Representations with Entity-aware Self-attention](https://arxiv.org/abs/2010.01057) by Ikuya Yamada, Akari Asai, Hiroyuki Shindo, Hideaki Takeda, Yuji Matsumoto.
|
||||||
@ -318,26 +356,41 @@ conda install -c huggingface transformers
|
|||||||
1. **[M2M100](https://huggingface.co/docs/transformers/model_doc/m2m_100)** (from Facebook) released with the paper [Beyond English-Centric Multilingual Machine Translation](https://arxiv.org/abs/2010.11125) by Angela Fan, Shruti Bhosale, Holger Schwenk, Zhiyi Ma, Ahmed El-Kishky, Siddharth Goyal, Mandeep Baines, Onur Celebi, Guillaume Wenzek, Vishrav Chaudhary, Naman Goyal, Tom Birch, Vitaliy Liptchinsky, Sergey Edunov, Edouard Grave, Michael Auli, Armand Joulin.
|
1. **[M2M100](https://huggingface.co/docs/transformers/model_doc/m2m_100)** (from Facebook) released with the paper [Beyond English-Centric Multilingual Machine Translation](https://arxiv.org/abs/2010.11125) by Angela Fan, Shruti Bhosale, Holger Schwenk, Zhiyi Ma, Ahmed El-Kishky, Siddharth Goyal, Mandeep Baines, Onur Celebi, Guillaume Wenzek, Vishrav Chaudhary, Naman Goyal, Tom Birch, Vitaliy Liptchinsky, Sergey Edunov, Edouard Grave, Michael Auli, Armand Joulin.
|
||||||
1. **[MarianMT](https://huggingface.co/docs/transformers/model_doc/marian)** Machine translation models trained using [OPUS](http://opus.nlpl.eu/) data by Jörg Tiedemann. The [Marian Framework](https://marian-nmt.github.io/) is being developed by the Microsoft Translator Team.
|
1. **[MarianMT](https://huggingface.co/docs/transformers/model_doc/marian)** Machine translation models trained using [OPUS](http://opus.nlpl.eu/) data by Jörg Tiedemann. The [Marian Framework](https://marian-nmt.github.io/) is being developed by the Microsoft Translator Team.
|
||||||
1. **[MarkupLM](https://huggingface.co/docs/transformers/model_doc/markuplm)** (from Microsoft Research Asia) released with the paper [MarkupLM: Pre-training of Text and Markup Language for Visually-rich Document Understanding](https://arxiv.org/abs/2110.08518) by Junlong Li, Yiheng Xu, Lei Cui, Furu Wei.
|
1. **[MarkupLM](https://huggingface.co/docs/transformers/model_doc/markuplm)** (from Microsoft Research Asia) released with the paper [MarkupLM: Pre-training of Text and Markup Language for Visually-rich Document Understanding](https://arxiv.org/abs/2110.08518) by Junlong Li, Yiheng Xu, Lei Cui, Furu Wei.
|
||||||
|
1. **[Mask2Former](https://huggingface.co/docs/transformers/model_doc/mask2former)** (from FAIR and UIUC) released with the paper [Masked-attention Mask Transformer for Universal Image Segmentation](https://arxiv.org/abs/2112.01527) by Bowen Cheng, Ishan Misra, Alexander G. Schwing, Alexander Kirillov, Rohit Girdhar.
|
||||||
1. **[MaskFormer](https://huggingface.co/docs/transformers/model_doc/maskformer)** (from Meta and UIUC) released with the paper [Per-Pixel Classification is Not All You Need for Semantic Segmentation](https://arxiv.org/abs/2107.06278) by Bowen Cheng, Alexander G. Schwing, Alexander Kirillov
|
1. **[MaskFormer](https://huggingface.co/docs/transformers/model_doc/maskformer)** (from Meta and UIUC) released with the paper [Per-Pixel Classification is Not All You Need for Semantic Segmentation](https://arxiv.org/abs/2107.06278) by Bowen Cheng, Alexander G. Schwing, Alexander Kirillov
|
||||||
|
1. **[MatCha](https://huggingface.co/docs/transformers/model_doc/matcha)** (from Google AI) released with the paper [MatCha: Enhancing Visual Language Pretraining with Math Reasoning and Chart Derendering](https://arxiv.org/abs/2212.09662) by Fangyu Liu, Francesco Piccinno, Syrine Krichene, Chenxi Pang, Kenton Lee, Mandar Joshi, Yasemin Altun, Nigel Collier, Julian Martin Eisenschlos.
|
||||||
1. **[mBART](https://huggingface.co/docs/transformers/model_doc/mbart)** (from Facebook) released with the paper [Multilingual Denoising Pre-training for Neural Machine Translation](https://arxiv.org/abs/2001.08210) by Yinhan Liu, Jiatao Gu, Naman Goyal, Xian Li, Sergey Edunov, Marjan Ghazvininejad, Mike Lewis, Luke Zettlemoyer.
|
1. **[mBART](https://huggingface.co/docs/transformers/model_doc/mbart)** (from Facebook) released with the paper [Multilingual Denoising Pre-training for Neural Machine Translation](https://arxiv.org/abs/2001.08210) by Yinhan Liu, Jiatao Gu, Naman Goyal, Xian Li, Sergey Edunov, Marjan Ghazvininejad, Mike Lewis, Luke Zettlemoyer.
|
||||||
1. **[mBART-50](https://huggingface.co/docs/transformers/model_doc/mbart)** (from Facebook) released with the paper [Multilingual Translation with Extensible Multilingual Pretraining and Finetuning](https://arxiv.org/abs/2008.00401) by Yuqing Tang, Chau Tran, Xian Li, Peng-Jen Chen, Naman Goyal, Vishrav Chaudhary, Jiatao Gu, Angela Fan.
|
1. **[mBART-50](https://huggingface.co/docs/transformers/model_doc/mbart)** (from Facebook) released with the paper [Multilingual Translation with Extensible Multilingual Pretraining and Finetuning](https://arxiv.org/abs/2008.00401) by Yuqing Tang, Chau Tran, Xian Li, Peng-Jen Chen, Naman Goyal, Vishrav Chaudhary, Jiatao Gu, Angela Fan.
|
||||||
|
1. **[MEGA](https://huggingface.co/docs/transformers/model_doc/mega)** (from Facebook) released with the paper [Mega: Moving Average Equipped Gated Attention](https://arxiv.org/abs/2209.10655) by Xuezhe Ma, Chunting Zhou, Xiang Kong, Junxian He, Liangke Gui, Graham Neubig, Jonathan May, and Luke Zettlemoyer.
|
||||||
1. **[Megatron-BERT](https://huggingface.co/docs/transformers/model_doc/megatron-bert)** (from NVIDIA) released with the paper [Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism](https://arxiv.org/abs/1909.08053) by Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper and Bryan Catanzaro.
|
1. **[Megatron-BERT](https://huggingface.co/docs/transformers/model_doc/megatron-bert)** (from NVIDIA) released with the paper [Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism](https://arxiv.org/abs/1909.08053) by Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper and Bryan Catanzaro.
|
||||||
1. **[Megatron-GPT2](https://huggingface.co/docs/transformers/model_doc/megatron_gpt2)** (from NVIDIA) released with the paper [Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism](https://arxiv.org/abs/1909.08053) by Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper and Bryan Catanzaro.
|
1. **[Megatron-GPT2](https://huggingface.co/docs/transformers/model_doc/megatron_gpt2)** (from NVIDIA) released with the paper [Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism](https://arxiv.org/abs/1909.08053) by Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper and Bryan Catanzaro.
|
||||||
|
1. **[MGP-STR](https://huggingface.co/docs/transformers/model_doc/mgp-str)** (from Alibaba Research) released with the paper [Multi-Granularity Prediction for Scene Text Recognition](https://arxiv.org/abs/2209.03592) by Peng Wang, Cheng Da, and Cong Yao.
|
||||||
1. **[mLUKE](https://huggingface.co/docs/transformers/model_doc/mluke)** (from Studio Ousia) released with the paper [mLUKE: The Power of Entity Representations in Multilingual Pretrained Language Models](https://arxiv.org/abs/2110.08151) by Ryokan Ri, Ikuya Yamada, and Yoshimasa Tsuruoka.
|
1. **[mLUKE](https://huggingface.co/docs/transformers/model_doc/mluke)** (from Studio Ousia) released with the paper [mLUKE: The Power of Entity Representations in Multilingual Pretrained Language Models](https://arxiv.org/abs/2110.08151) by Ryokan Ri, Ikuya Yamada, and Yoshimasa Tsuruoka.
|
||||||
|
1. **[MMS](https://huggingface.co/docs/transformers/model_doc/mms)** (from Facebook) released with the paper [Scaling Speech Technology to 1,000+ Languages](https://arxiv.org/abs/2305.13516) by Vineel Pratap, Andros Tjandra, Bowen Shi, Paden Tomasello, Arun Babu, Sayani Kundu, Ali Elkahky, Zhaoheng Ni, Apoorv Vyas, Maryam Fazel-Zarandi, Alexei Baevski, Yossi Adi, Xiaohui Zhang, Wei-Ning Hsu, Alexis Conneau, Michael Auli.
|
||||||
1. **[MobileBERT](https://huggingface.co/docs/transformers/model_doc/mobilebert)** (from CMU/Google Brain) released with the paper [MobileBERT: a Compact Task-Agnostic BERT for Resource-Limited Devices](https://arxiv.org/abs/2004.02984) by Zhiqing Sun, Hongkun Yu, Xiaodan Song, Renjie Liu, Yiming Yang, and Denny Zhou.
|
1. **[MobileBERT](https://huggingface.co/docs/transformers/model_doc/mobilebert)** (from CMU/Google Brain) released with the paper [MobileBERT: a Compact Task-Agnostic BERT for Resource-Limited Devices](https://arxiv.org/abs/2004.02984) by Zhiqing Sun, Hongkun Yu, Xiaodan Song, Renjie Liu, Yiming Yang, and Denny Zhou.
|
||||||
|
1. **[MobileNetV1](https://huggingface.co/docs/transformers/model_doc/mobilenet_v1)** (from Google Inc.) released with the paper [MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications](https://arxiv.org/abs/1704.04861) by Andrew G. Howard, Menglong Zhu, Bo Chen, Dmitry Kalenichenko, Weijun Wang, Tobias Weyand, Marco Andreetto, Hartwig Adam.
|
||||||
|
1. **[MobileNetV2](https://huggingface.co/docs/transformers/model_doc/mobilenet_v2)** (from Google Inc.) released with the paper [MobileNetV2: Inverted Residuals and Linear Bottlenecks](https://arxiv.org/abs/1801.04381) by Mark Sandler, Andrew Howard, Menglong Zhu, Andrey Zhmoginov, Liang-Chieh Chen.
|
||||||
1. **[MobileViT](https://huggingface.co/docs/transformers/model_doc/mobilevit)** (from Apple) released with the paper [MobileViT: Light-weight, General-purpose, and Mobile-friendly Vision Transformer](https://arxiv.org/abs/2110.02178) by Sachin Mehta and Mohammad Rastegari.
|
1. **[MobileViT](https://huggingface.co/docs/transformers/model_doc/mobilevit)** (from Apple) released with the paper [MobileViT: Light-weight, General-purpose, and Mobile-friendly Vision Transformer](https://arxiv.org/abs/2110.02178) by Sachin Mehta and Mohammad Rastegari.
|
||||||
|
1. **[MobileViTV2](https://huggingface.co/docs/transformers/model_doc/mobilevitv2)** (from Apple) released with the paper [Separable Self-attention for Mobile Vision Transformers](https://arxiv.org/abs/2206.02680) by Sachin Mehta and Mohammad Rastegari.
|
||||||
1. **[MPNet](https://huggingface.co/docs/transformers/model_doc/mpnet)** (from Microsoft Research) released with the paper [MPNet: Masked and Permuted Pre-training for Language Understanding](https://arxiv.org/abs/2004.09297) by Kaitao Song, Xu Tan, Tao Qin, Jianfeng Lu, Tie-Yan Liu.
|
1. **[MPNet](https://huggingface.co/docs/transformers/model_doc/mpnet)** (from Microsoft Research) released with the paper [MPNet: Masked and Permuted Pre-training for Language Understanding](https://arxiv.org/abs/2004.09297) by Kaitao Song, Xu Tan, Tao Qin, Jianfeng Lu, Tie-Yan Liu.
|
||||||
|
1. **[MRA](https://huggingface.co/docs/transformers/model_doc/mra)** (from the University of Wisconsin - Madison) released with the paper [Multi Resolution Analysis (MRA)](https://arxiv.org/abs/2207.10284) by Zhanpeng Zeng, Sourav Pal, Jeffery Kline, Glenn M Fung, Vikas Singh.
|
||||||
1. **[MT5](https://huggingface.co/docs/transformers/model_doc/mt5)** (from Google AI) released with the paper [mT5: A massively multilingual pre-trained text-to-text transformer](https://arxiv.org/abs/2010.11934) by Linting Xue, Noah Constant, Adam Roberts, Mihir Kale, Rami Al-Rfou, Aditya Siddhant, Aditya Barua, Colin Raffel.
|
1. **[MT5](https://huggingface.co/docs/transformers/model_doc/mt5)** (from Google AI) released with the paper [mT5: A massively multilingual pre-trained text-to-text transformer](https://arxiv.org/abs/2010.11934) by Linting Xue, Noah Constant, Adam Roberts, Mihir Kale, Rami Al-Rfou, Aditya Siddhant, Aditya Barua, Colin Raffel.
|
||||||
|
1. **[MusicGen](https://huggingface.co/docs/transformers/model_doc/musicgen)** (from Meta) released with the paper [Simple and Controllable Music Generation](https://arxiv.org/abs/2306.05284) by Jade Copet, Felix Kreuk, Itai Gat, Tal Remez, David Kant, Gabriel Synnaeve, Yossi Adi and Alexandre Défossez.
|
||||||
1. **[MVP](https://huggingface.co/docs/transformers/model_doc/mvp)** (from RUC AI Box) released with the paper [MVP: Multi-task Supervised Pre-training for Natural Language Generation](https://arxiv.org/abs/2206.12131) by Tianyi Tang, Junyi Li, Wayne Xin Zhao and Ji-Rong Wen.
|
1. **[MVP](https://huggingface.co/docs/transformers/model_doc/mvp)** (from RUC AI Box) released with the paper [MVP: Multi-task Supervised Pre-training for Natural Language Generation](https://arxiv.org/abs/2206.12131) by Tianyi Tang, Junyi Li, Wayne Xin Zhao and Ji-Rong Wen.
|
||||||
|
1. **[NAT](https://huggingface.co/docs/transformers/model_doc/nat)** (from SHI Labs) released with the paper [Neighborhood Attention Transformer](https://arxiv.org/abs/2204.07143) by Ali Hassani, Steven Walton, Jiachen Li, Shen Li, and Humphrey Shi.
|
||||||
1. **[Nezha](https://huggingface.co/docs/transformers/model_doc/nezha)** (from Huawei Noah’s Ark Lab) released with the paper [NEZHA: Neural Contextualized Representation for Chinese Language Understanding](https://arxiv.org/abs/1909.00204) by Junqiu Wei, Xiaozhe Ren, Xiaoguang Li, Wenyong Huang, Yi Liao, Yasheng Wang, Jiashu Lin, Xin Jiang, Xiao Chen and Qun Liu.
|
1. **[Nezha](https://huggingface.co/docs/transformers/model_doc/nezha)** (from Huawei Noah’s Ark Lab) released with the paper [NEZHA: Neural Contextualized Representation for Chinese Language Understanding](https://arxiv.org/abs/1909.00204) by Junqiu Wei, Xiaozhe Ren, Xiaoguang Li, Wenyong Huang, Yi Liao, Yasheng Wang, Jiashu Lin, Xin Jiang, Xiao Chen and Qun Liu.
|
||||||
1. **[NLLB](https://huggingface.co/docs/transformers/model_doc/nllb)** (from Meta) released with the paper [No Language Left Behind: Scaling Human-Centered Machine Translation](https://arxiv.org/abs/2207.04672) by the NLLB team.
|
1. **[NLLB](https://huggingface.co/docs/transformers/model_doc/nllb)** (from Meta) released with the paper [No Language Left Behind: Scaling Human-Centered Machine Translation](https://arxiv.org/abs/2207.04672) by the NLLB team.
|
||||||
|
1. **[NLLB-MOE](https://huggingface.co/docs/transformers/model_doc/nllb-moe)** (from Meta) released with the paper [No Language Left Behind: Scaling Human-Centered Machine Translation](https://arxiv.org/abs/2207.04672) by the NLLB team.
|
||||||
1. **[Nyströmformer](https://huggingface.co/docs/transformers/model_doc/nystromformer)** (from the University of Wisconsin - Madison) released with the paper [Nyströmformer: A Nyström-Based Algorithm for Approximating Self-Attention](https://arxiv.org/abs/2102.03902) by Yunyang Xiong, Zhanpeng Zeng, Rudrasis Chakraborty, Mingxing Tan, Glenn Fung, Yin Li, Vikas Singh.
|
1. **[Nyströmformer](https://huggingface.co/docs/transformers/model_doc/nystromformer)** (from the University of Wisconsin - Madison) released with the paper [Nyströmformer: A Nyström-Based Algorithm for Approximating Self-Attention](https://arxiv.org/abs/2102.03902) by Yunyang Xiong, Zhanpeng Zeng, Rudrasis Chakraborty, Mingxing Tan, Glenn Fung, Yin Li, Vikas Singh.
|
||||||
|
1. **[OneFormer](https://huggingface.co/docs/transformers/model_doc/oneformer)** (from SHI Labs) released with the paper [OneFormer: One Transformer to Rule Universal Image Segmentation](https://arxiv.org/abs/2211.06220) by Jitesh Jain, Jiachen Li, MangTik Chiu, Ali Hassani, Nikita Orlov, Humphrey Shi.
|
||||||
|
1. **[OpenLlama](https://huggingface.co/docs/transformers/model_doc/open-llama)** (from [s-JoL](https://huggingface.co/s-JoL)) released in [Open-Llama](https://github.com/s-JoL/Open-Llama).
|
||||||
1. **[OPT](https://huggingface.co/docs/transformers/master/model_doc/opt)** (from Meta AI) released with the paper [OPT: Open Pre-trained Transformer Language Models](https://arxiv.org/abs/2205.01068) by Susan Zhang, Stephen Roller, Naman Goyal, Mikel Artetxe, Moya Chen, Shuohui Chen et al.
|
1. **[OPT](https://huggingface.co/docs/transformers/master/model_doc/opt)** (from Meta AI) released with the paper [OPT: Open Pre-trained Transformer Language Models](https://arxiv.org/abs/2205.01068) by Susan Zhang, Stephen Roller, Naman Goyal, Mikel Artetxe, Moya Chen, Shuohui Chen et al.
|
||||||
1. **[OWL-ViT](https://huggingface.co/docs/transformers/model_doc/owlvit)** (from Google AI) released with the paper [Simple Open-Vocabulary Object Detection with Vision Transformers](https://arxiv.org/abs/2205.06230) by Matthias Minderer, Alexey Gritsenko, Austin Stone, Maxim Neumann, Dirk Weissenborn, Alexey Dosovitskiy, Aravindh Mahendran, Anurag Arnab, Mostafa Dehghani, Zhuoran Shen, Xiao Wang, Xiaohua Zhai, Thomas Kipf, and Neil Houlsby.
|
1. **[OWL-ViT](https://huggingface.co/docs/transformers/model_doc/owlvit)** (from Google AI) released with the paper [Simple Open-Vocabulary Object Detection with Vision Transformers](https://arxiv.org/abs/2205.06230) by Matthias Minderer, Alexey Gritsenko, Austin Stone, Maxim Neumann, Dirk Weissenborn, Alexey Dosovitskiy, Aravindh Mahendran, Anurag Arnab, Mostafa Dehghani, Zhuoran Shen, Xiao Wang, Xiaohua Zhai, Thomas Kipf, and Neil Houlsby.
|
||||||
1. **[Pegasus](https://huggingface.co/docs/transformers/model_doc/pegasus)** (from Google) released with the paper [PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive Summarization](https://arxiv.org/abs/1912.08777) by Jingqing Zhang, Yao Zhao, Mohammad Saleh and Peter J. Liu.
|
1. **[Pegasus](https://huggingface.co/docs/transformers/model_doc/pegasus)** (from Google) released with the paper [PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive Summarization](https://arxiv.org/abs/1912.08777) by Jingqing Zhang, Yao Zhao, Mohammad Saleh and Peter J. Liu.
|
||||||
1. **[PEGASUS-X](https://huggingface.co/docs/transformers/model_doc/pegasus_x)** (from Google) released with the paper [Investigating Efficiently Extending Transformers for Long Input Summarization](https://arxiv.org/abs/2208.04347) by Jason Phang, Yao Zhao, Peter J. Liu.
|
1. **[PEGASUS-X](https://huggingface.co/docs/transformers/model_doc/pegasus_x)** (from Google) released with the paper [Investigating Efficiently Extending Transformers for Long Input Summarization](https://arxiv.org/abs/2208.04347) by Jason Phang, Yao Zhao, Peter J. Liu.
|
||||||
1. **[Perceiver IO](https://huggingface.co/docs/transformers/model_doc/perceiver)** (from Deepmind) released with the paper [Perceiver IO: A General Architecture for Structured Inputs & Outputs](https://arxiv.org/abs/2107.14795) by Andrew Jaegle, Sebastian Borgeaud, Jean-Baptiste Alayrac, Carl Doersch, Catalin Ionescu, David Ding, Skanda Koppula, Daniel Zoran, Andrew Brock, Evan Shelhamer, Olivier Hénaff, Matthew M. Botvinick, Andrew Zisserman, Oriol Vinyals, João Carreira.
|
1. **[Perceiver IO](https://huggingface.co/docs/transformers/model_doc/perceiver)** (from Deepmind) released with the paper [Perceiver IO: A General Architecture for Structured Inputs & Outputs](https://arxiv.org/abs/2107.14795) by Andrew Jaegle, Sebastian Borgeaud, Jean-Baptiste Alayrac, Carl Doersch, Catalin Ionescu, David Ding, Skanda Koppula, Daniel Zoran, Andrew Brock, Evan Shelhamer, Olivier Hénaff, Matthew M. Botvinick, Andrew Zisserman, Oriol Vinyals, João Carreira.
|
||||||
1. **[PhoBERT](https://huggingface.co/docs/transformers/model_doc/phobert)** (from VinAI Research) released with the paper [PhoBERT: Pre-trained language models for Vietnamese](https://www.aclweb.org/anthology/2020.findings-emnlp.92/) by Dat Quoc Nguyen and Anh Tuan Nguyen.
|
1. **[PhoBERT](https://huggingface.co/docs/transformers/model_doc/phobert)** (from VinAI Research) released with the paper [PhoBERT: Pre-trained language models for Vietnamese](https://www.aclweb.org/anthology/2020.findings-emnlp.92/) by Dat Quoc Nguyen and Anh Tuan Nguyen.
|
||||||
|
1. **[Pix2Struct](https://huggingface.co/docs/transformers/model_doc/pix2struct)** (from Google) released with the paper [Pix2Struct: Screenshot Parsing as Pretraining for Visual Language Understanding](https://arxiv.org/abs/2210.03347) by Kenton Lee, Mandar Joshi, Iulia Turc, Hexiang Hu, Fangyu Liu, Julian Eisenschlos, Urvashi Khandelwal, Peter Shaw, Ming-Wei Chang, Kristina Toutanova.
|
||||||
1. **[PLBart](https://huggingface.co/docs/transformers/model_doc/plbart)** (from UCLA NLP) released with the paper [Unified Pre-training for Program Understanding and Generation](https://arxiv.org/abs/2103.06333) by Wasi Uddin Ahmad, Saikat Chakraborty, Baishakhi Ray, Kai-Wei Chang.
|
1. **[PLBart](https://huggingface.co/docs/transformers/model_doc/plbart)** (from UCLA NLP) released with the paper [Unified Pre-training for Program Understanding and Generation](https://arxiv.org/abs/2103.06333) by Wasi Uddin Ahmad, Saikat Chakraborty, Baishakhi Ray, Kai-Wei Chang.
|
||||||
1. **[PoolFormer](https://huggingface.co/docs/transformers/model_doc/poolformer)** (from Sea AI Labs) released with the paper [MetaFormer is Actually What You Need for Vision](https://arxiv.org/abs/2111.11418) by Yu, Weihao and Luo, Mi and Zhou, Pan and Si, Chenyang and Zhou, Yichen and Wang, Xinchao and Feng, Jiashi and Yan, Shuicheng.
|
1. **[PoolFormer](https://huggingface.co/docs/transformers/model_doc/poolformer)** (from Sea AI Labs) released with the paper [MetaFormer is Actually What You Need for Vision](https://arxiv.org/abs/2111.11418) by Yu, Weihao and Luo, Mi and Zhou, Pan and Si, Chenyang and Zhou, Yichen and Wang, Xinchao and Feng, Jiashi and Yan, Shuicheng.
|
||||||
1. **[ProphetNet](https://huggingface.co/docs/transformers/model_doc/prophetnet)** (from Microsoft Research) released with the paper [ProphetNet: Predicting Future N-gram for Sequence-to-Sequence Pre-training](https://arxiv.org/abs/2001.04063) by Yu Yan, Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang and Ming Zhou.
|
1. **[ProphetNet](https://huggingface.co/docs/transformers/model_doc/prophetnet)** (from Microsoft Research) released with the paper [ProphetNet: Predicting Future N-gram for Sequence-to-Sequence Pre-training](https://arxiv.org/abs/2001.04063) by Yu Yan, Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang and Ming Zhou.
|
||||||
@ -349,45 +402,62 @@ conda install -c huggingface transformers
|
|||||||
1. **[RemBERT](https://huggingface.co/docs/transformers/model_doc/rembert)** (from Google Research) released with the paper [Rethinking embedding coupling in pre-trained language models](https://arxiv.org/pdf/2010.12821.pdf) by Hyung Won Chung, Thibault Févry, Henry Tsai, M. Johnson, Sebastian Ruder.
|
1. **[RemBERT](https://huggingface.co/docs/transformers/model_doc/rembert)** (from Google Research) released with the paper [Rethinking embedding coupling in pre-trained language models](https://arxiv.org/pdf/2010.12821.pdf) by Hyung Won Chung, Thibault Févry, Henry Tsai, M. Johnson, Sebastian Ruder.
|
||||||
1. **[ResNet](https://huggingface.co/docs/transformers/model_doc/resnet)** (from Microsoft Research) released with the paper [Deep Residual Learning for Image Recognition](https://arxiv.org/abs/1512.03385) by Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun.
|
1. **[ResNet](https://huggingface.co/docs/transformers/model_doc/resnet)** (from Microsoft Research) released with the paper [Deep Residual Learning for Image Recognition](https://arxiv.org/abs/1512.03385) by Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun.
|
||||||
1. **[RoBERTa](https://huggingface.co/docs/transformers/model_doc/roberta)** (from Facebook), released together with the paper a [Robustly Optimized BERT Pretraining Approach](https://arxiv.org/abs/1907.11692) by Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, Veselin Stoyanov.
|
1. **[RoBERTa](https://huggingface.co/docs/transformers/model_doc/roberta)** (from Facebook), released together with the paper a [Robustly Optimized BERT Pretraining Approach](https://arxiv.org/abs/1907.11692) by Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, Veselin Stoyanov.
|
||||||
|
1. **[RoBERTa-PreLayerNorm](https://huggingface.co/docs/transformers/model_doc/roberta-prelayernorm)** (from Facebook) released with the paper [fairseq: A Fast, Extensible Toolkit for Sequence Modeling](https://arxiv.org/abs/1904.01038) by Myle Ott, Sergey Edunov, Alexei Baevski, Angela Fan, Sam Gross, Nathan Ng, David Grangier, Michael Auli.
|
||||||
|
1. **[RoCBert](https://huggingface.co/docs/transformers/model_doc/roc_bert)** (from WeChatAI) released with the paper [RoCBert: Robust Chinese Bert with Multimodal Contrastive Pretraining](https://aclanthology.org/2022.acl-long.65.pdf) by HuiSu, WeiweiShi, XiaoyuShen, XiaoZhou, TuoJi, JiaruiFang, JieZhou.
|
||||||
1. **[RoFormer](https://huggingface.co/docs/transformers/model_doc/roformer)** (from ZhuiyiTechnology), released together with the paper a [RoFormer: Enhanced Transformer with Rotary Position Embedding](https://arxiv.org/pdf/2104.09864v1.pdf) by Jianlin Su and Yu Lu and Shengfeng Pan and Bo Wen and Yunfeng Liu.
|
1. **[RoFormer](https://huggingface.co/docs/transformers/model_doc/roformer)** (from ZhuiyiTechnology), released together with the paper a [RoFormer: Enhanced Transformer with Rotary Position Embedding](https://arxiv.org/pdf/2104.09864v1.pdf) by Jianlin Su and Yu Lu and Shengfeng Pan and Bo Wen and Yunfeng Liu.
|
||||||
|
1. **[RWKV](https://huggingface.co/docs/transformers/model_doc/rwkv)** (from Bo Peng) released with the paper [this repo](https://github.com/BlinkDL/RWKV-LM) by Bo Peng.
|
||||||
1. **[SegFormer](https://huggingface.co/docs/transformers/model_doc/segformer)** (from NVIDIA) released with the paper [SegFormer: Simple and Efficient Design for Semantic Segmentation with Transformers](https://arxiv.org/abs/2105.15203) by Enze Xie, Wenhai Wang, Zhiding Yu, Anima Anandkumar, Jose M. Alvarez, Ping Luo.
|
1. **[SegFormer](https://huggingface.co/docs/transformers/model_doc/segformer)** (from NVIDIA) released with the paper [SegFormer: Simple and Efficient Design for Semantic Segmentation with Transformers](https://arxiv.org/abs/2105.15203) by Enze Xie, Wenhai Wang, Zhiding Yu, Anima Anandkumar, Jose M. Alvarez, Ping Luo.
|
||||||
|
1. **[Segment Anything](https://huggingface.co/docs/transformers/model_doc/sam)** (from Meta AI) released with the paper [Segment Anything](https://arxiv.org/pdf/2304.02643v1.pdf) by Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alex Berg, Wan-Yen Lo, Piotr Dollar, Ross Girshick.
|
||||||
1. **[SEW](https://huggingface.co/docs/transformers/model_doc/sew)** (from ASAPP) released with the paper [Performance-Efficiency Trade-offs in Unsupervised Pre-training for Speech Recognition](https://arxiv.org/abs/2109.06870) by Felix Wu, Kwangyoun Kim, Jing Pan, Kyu Han, Kilian Q. Weinberger, Yoav Artzi.
|
1. **[SEW](https://huggingface.co/docs/transformers/model_doc/sew)** (from ASAPP) released with the paper [Performance-Efficiency Trade-offs in Unsupervised Pre-training for Speech Recognition](https://arxiv.org/abs/2109.06870) by Felix Wu, Kwangyoun Kim, Jing Pan, Kyu Han, Kilian Q. Weinberger, Yoav Artzi.
|
||||||
1. **[SEW-D](https://huggingface.co/docs/transformers/model_doc/sew_d)** (from ASAPP) released with the paper [Performance-Efficiency Trade-offs in Unsupervised Pre-training for Speech Recognition](https://arxiv.org/abs/2109.06870) by Felix Wu, Kwangyoun Kim, Jing Pan, Kyu Han, Kilian Q. Weinberger, Yoav Artzi.
|
1. **[SEW-D](https://huggingface.co/docs/transformers/model_doc/sew_d)** (from ASAPP) released with the paper [Performance-Efficiency Trade-offs in Unsupervised Pre-training for Speech Recognition](https://arxiv.org/abs/2109.06870) by Felix Wu, Kwangyoun Kim, Jing Pan, Kyu Han, Kilian Q. Weinberger, Yoav Artzi.
|
||||||
|
1. **[SpeechT5](https://huggingface.co/docs/transformers/model_doc/speecht5)** (from Microsoft Research) released with the paper [SpeechT5: Unified-Modal Encoder-Decoder Pre-Training for Spoken Language Processing](https://arxiv.org/abs/2110.07205) by Junyi Ao, Rui Wang, Long Zhou, Chengyi Wang, Shuo Ren, Yu Wu, Shujie Liu, Tom Ko, Qing Li, Yu Zhang, Zhihua Wei, Yao Qian, Jinyu Li, Furu Wei.
|
||||||
1. **[SpeechToTextTransformer](https://huggingface.co/docs/transformers/model_doc/speech_to_text)** (from Facebook), released together with the paper [fairseq S2T: Fast Speech-to-Text Modeling with fairseq](https://arxiv.org/abs/2010.05171) by Changhan Wang, Yun Tang, Xutai Ma, Anne Wu, Dmytro Okhonko, Juan Pino.
|
1. **[SpeechToTextTransformer](https://huggingface.co/docs/transformers/model_doc/speech_to_text)** (from Facebook), released together with the paper [fairseq S2T: Fast Speech-to-Text Modeling with fairseq](https://arxiv.org/abs/2010.05171) by Changhan Wang, Yun Tang, Xutai Ma, Anne Wu, Dmytro Okhonko, Juan Pino.
|
||||||
1. **[SpeechToTextTransformer2](https://huggingface.co/docs/transformers/model_doc/speech_to_text_2)** (from Facebook) released with the paper [Large-Scale Self- and Semi-Supervised Learning for Speech Translation](https://arxiv.org/abs/2104.06678) by Changhan Wang, Anne Wu, Juan Pino, Alexei Baevski, Michael Auli, Alexis Conneau.
|
1. **[SpeechToTextTransformer2](https://huggingface.co/docs/transformers/model_doc/speech_to_text_2)** (from Facebook) released with the paper [Large-Scale Self- and Semi-Supervised Learning for Speech Translation](https://arxiv.org/abs/2104.06678) by Changhan Wang, Anne Wu, Juan Pino, Alexei Baevski, Michael Auli, Alexis Conneau.
|
||||||
1. **[Splinter](https://huggingface.co/docs/transformers/model_doc/splinter)** (from Tel Aviv University) released with the paper [Few-Shot Question Answering by Pretraining Span Selection](https://arxiv.org/abs/2101.00438) by Ori Ram, Yuval Kirstain, Jonathan Berant, Amir Globerson, Omer Levy.
|
1. **[Splinter](https://huggingface.co/docs/transformers/model_doc/splinter)** (from Tel Aviv University) released with the paper [Few-Shot Question Answering by Pretraining Span Selection](https://arxiv.org/abs/2101.00438) by Ori Ram, Yuval Kirstain, Jonathan Berant, Amir Globerson, Omer Levy.
|
||||||
1. **[SqueezeBERT](https://huggingface.co/docs/transformers/model_doc/squeezebert)** (from Berkeley) released with the paper [SqueezeBERT: What can computer vision teach NLP about efficient neural networks?](https://arxiv.org/abs/2006.11316) by Forrest N. Iandola, Albert E. Shaw, Ravi Krishna, and Kurt W. Keutzer.
|
1. **[SqueezeBERT](https://huggingface.co/docs/transformers/model_doc/squeezebert)** (from Berkeley) released with the paper [SqueezeBERT: What can computer vision teach NLP about efficient neural networks?](https://arxiv.org/abs/2006.11316) by Forrest N. Iandola, Albert E. Shaw, Ravi Krishna, and Kurt W. Keutzer.
|
||||||
|
1. **[SwiftFormer](https://huggingface.co/docs/transformers/model_doc/swiftformer)** (from MBZUAI) released with the paper [SwiftFormer: Efficient Additive Attention for Transformer-based Real-time Mobile Vision Applications](https://arxiv.org/abs/2303.15446) by Abdelrahman Shaker, Muhammad Maaz, Hanoona Rasheed, Salman Khan, Ming-Hsuan Yang, Fahad Shahbaz Khan.
|
||||||
1. **[Swin Transformer](https://huggingface.co/docs/transformers/model_doc/swin)** (from Microsoft) released with the paper [Swin Transformer: Hierarchical Vision Transformer using Shifted Windows](https://arxiv.org/abs/2103.14030) by Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, Baining Guo.
|
1. **[Swin Transformer](https://huggingface.co/docs/transformers/model_doc/swin)** (from Microsoft) released with the paper [Swin Transformer: Hierarchical Vision Transformer using Shifted Windows](https://arxiv.org/abs/2103.14030) by Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, Baining Guo.
|
||||||
1. **[Swin Transformer V2](https://huggingface.co/docs/transformers/model_doc/swinv2)** (from Microsoft) released with the paper [Swin Transformer V2: Scaling Up Capacity and Resolution](https://arxiv.org/abs/2111.09883) by Ze Liu, Han Hu, Yutong Lin, Zhuliang Yao, Zhenda Xie, Yixuan Wei, Jia Ning, Yue Cao, Zheng Zhang, Li Dong, Furu Wei, Baining Guo.
|
1. **[Swin Transformer V2](https://huggingface.co/docs/transformers/model_doc/swinv2)** (from Microsoft) released with the paper [Swin Transformer V2: Scaling Up Capacity and Resolution](https://arxiv.org/abs/2111.09883) by Ze Liu, Han Hu, Yutong Lin, Zhuliang Yao, Zhenda Xie, Yixuan Wei, Jia Ning, Yue Cao, Zheng Zhang, Li Dong, Furu Wei, Baining Guo.
|
||||||
|
1. **[Swin2SR](https://huggingface.co/docs/transformers/model_doc/swin2sr)** (from University of Würzburg) released with the paper [Swin2SR: SwinV2 Transformer for Compressed Image Super-Resolution and Restoration](https://arxiv.org/abs/2209.11345) by Marcos V. Conde, Ui-Jin Choi, Maxime Burchi, Radu Timofte.
|
||||||
|
1. **[SwitchTransformers](https://huggingface.co/docs/transformers/model_doc/switch_transformers)** (from Google) released with the paper [Switch Transformers: Scaling to Trillion Parameter Models with Simple and Efficient Sparsity](https://arxiv.org/abs/2101.03961) by William Fedus, Barret Zoph, Noam Shazeer.
|
||||||
1. **[T5](https://huggingface.co/docs/transformers/model_doc/t5)** (from Google AI) released with the paper [Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer](https://arxiv.org/abs/1910.10683) by Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu.
|
1. **[T5](https://huggingface.co/docs/transformers/model_doc/t5)** (from Google AI) released with the paper [Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer](https://arxiv.org/abs/1910.10683) by Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu.
|
||||||
1. **[T5v1.1](https://huggingface.co/docs/transformers/model_doc/t5v1.1)** (from Google AI) released with the paper [google-research/text-to-text-transfer-transformer](https://github.com/google-research/text-to-text-transfer-transformer/blob/main/released_checkpoints.md#t511) by Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu.
|
1. **[T5v1.1](https://huggingface.co/docs/transformers/model_doc/t5v1.1)** (from Google AI) released with the paper [google-research/text-to-text-transfer-transformer](https://github.com/google-research/text-to-text-transfer-transformer/blob/main/released_checkpoints.md#t511) by Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu.
|
||||||
|
1. **[Table Transformer](https://huggingface.co/docs/transformers/model_doc/table-transformer)** (from Microsoft Research) released with the paper [PubTables-1M: Towards Comprehensive Table Extraction From Unstructured Documents](https://arxiv.org/abs/2110.00061) by Brandon Smock, Rohith Pesala, Robin Abraham.
|
||||||
1. **[TAPAS](https://huggingface.co/docs/transformers/model_doc/tapas)** (from Google AI) released with the paper [TAPAS: Weakly Supervised Table Parsing via Pre-training](https://arxiv.org/abs/2004.02349) by Jonathan Herzig, Paweł Krzysztof Nowak, Thomas Müller, Francesco Piccinno and Julian Martin Eisenschlos.
|
1. **[TAPAS](https://huggingface.co/docs/transformers/model_doc/tapas)** (from Google AI) released with the paper [TAPAS: Weakly Supervised Table Parsing via Pre-training](https://arxiv.org/abs/2004.02349) by Jonathan Herzig, Paweł Krzysztof Nowak, Thomas Müller, Francesco Piccinno and Julian Martin Eisenschlos.
|
||||||
1. **[TAPEX](https://huggingface.co/docs/transformers/model_doc/tapex)** (from Microsoft Research) released with the paper [TAPEX: Table Pre-training via Learning a Neural SQL Executor](https://arxiv.org/abs/2107.07653) by Qian Liu, Bei Chen, Jiaqi Guo, Morteza Ziyadi, Zeqi Lin, Weizhu Chen, Jian-Guang Lou.
|
1. **[TAPEX](https://huggingface.co/docs/transformers/model_doc/tapex)** (from Microsoft Research) released with the paper [TAPEX: Table Pre-training via Learning a Neural SQL Executor](https://arxiv.org/abs/2107.07653) by Qian Liu, Bei Chen, Jiaqi Guo, Morteza Ziyadi, Zeqi Lin, Weizhu Chen, Jian-Guang Lou.
|
||||||
1. **[Time Series Transformer](https://huggingface.co/docs/transformers/model_doc/time_series_transformer)** (from HuggingFace).
|
1. **[Time Series Transformer](https://huggingface.co/docs/transformers/model_doc/time_series_transformer)** (from HuggingFace).
|
||||||
|
1. **[TimeSformer](https://huggingface.co/docs/transformers/model_doc/timesformer)** (from Facebook) released with the paper [Is Space-Time Attention All You Need for Video Understanding?](https://arxiv.org/abs/2102.05095) by Gedas Bertasius, Heng Wang, Lorenzo Torresani.
|
||||||
1. **[Trajectory Transformer](https://huggingface.co/docs/transformers/model_doc/trajectory_transformers)** (from the University of California at Berkeley) released with the paper [Offline Reinforcement Learning as One Big Sequence Modeling Problem](https://arxiv.org/abs/2106.02039) by Michael Janner, Qiyang Li, Sergey Levine
|
1. **[Trajectory Transformer](https://huggingface.co/docs/transformers/model_doc/trajectory_transformers)** (from the University of California at Berkeley) released with the paper [Offline Reinforcement Learning as One Big Sequence Modeling Problem](https://arxiv.org/abs/2106.02039) by Michael Janner, Qiyang Li, Sergey Levine
|
||||||
1. **[Transformer-XL](https://huggingface.co/docs/transformers/model_doc/transfo-xl)** (from Google/CMU) released with the paper [Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context](https://arxiv.org/abs/1901.02860) by Zihang Dai*, Zhilin Yang*, Yiming Yang, Jaime Carbonell, Quoc V. Le, Ruslan Salakhutdinov.
|
1. **[Transformer-XL](https://huggingface.co/docs/transformers/model_doc/transfo-xl)** (from Google/CMU) released with the paper [Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context](https://arxiv.org/abs/1901.02860) by Zihang Dai*, Zhilin Yang*, Yiming Yang, Jaime Carbonell, Quoc V. Le, Ruslan Salakhutdinov.
|
||||||
1. **[TrOCR](https://huggingface.co/docs/transformers/model_doc/trocr)** (from Microsoft) released with the paper [TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models](https://arxiv.org/abs/2109.10282) by Minghao Li, Tengchao Lv, Lei Cui, Yijuan Lu, Dinei Florencio, Cha Zhang, Zhoujun Li, Furu Wei.
|
1. **[TrOCR](https://huggingface.co/docs/transformers/model_doc/trocr)** (from Microsoft) released with the paper [TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models](https://arxiv.org/abs/2109.10282) by Minghao Li, Tengchao Lv, Lei Cui, Yijuan Lu, Dinei Florencio, Cha Zhang, Zhoujun Li, Furu Wei.
|
||||||
|
1. **[TVLT](https://huggingface.co/docs/transformers/model_doc/tvlt)** (from UNC Chapel Hill) released with the paper [TVLT: Textless Vision-Language Transformer](https://arxiv.org/abs/2209.14156) by Zineng Tang, Jaemin Cho, Yixin Nie, Mohit Bansal.
|
||||||
1. **[UL2](https://huggingface.co/docs/transformers/model_doc/ul2)** (from Google Research) released with the paper [Unifying Language Learning Paradigms](https://arxiv.org/abs/2205.05131v1) by Yi Tay, Mostafa Dehghani, Vinh Q. Tran, Xavier Garcia, Dara Bahri, Tal Schuster, Huaixiu Steven Zheng, Neil Houlsby, Donald Metzler
|
1. **[UL2](https://huggingface.co/docs/transformers/model_doc/ul2)** (from Google Research) released with the paper [Unifying Language Learning Paradigms](https://arxiv.org/abs/2205.05131v1) by Yi Tay, Mostafa Dehghani, Vinh Q. Tran, Xavier Garcia, Dara Bahri, Tal Schuster, Huaixiu Steven Zheng, Neil Houlsby, Donald Metzler
|
||||||
|
1. **[UMT5](https://huggingface.co/docs/transformers/model_doc/umt5)** (from Google Research) released with the paper [UniMax: Fairer and More Effective Language Sampling for Large-Scale Multilingual Pretraining](https://openreview.net/forum?id=kXwdL1cWOAi) by Hyung Won Chung, Xavier Garcia, Adam Roberts, Yi Tay, Orhan Firat, Sharan Narang, Noah Constant.
|
||||||
1. **[UniSpeech](https://huggingface.co/docs/transformers/model_doc/unispeech)** (from Microsoft Research) released with the paper [UniSpeech: Unified Speech Representation Learning with Labeled and Unlabeled Data](https://arxiv.org/abs/2101.07597) by Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei, Michael Zeng, Xuedong Huang.
|
1. **[UniSpeech](https://huggingface.co/docs/transformers/model_doc/unispeech)** (from Microsoft Research) released with the paper [UniSpeech: Unified Speech Representation Learning with Labeled and Unlabeled Data](https://arxiv.org/abs/2101.07597) by Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei, Michael Zeng, Xuedong Huang.
|
||||||
1. **[UniSpeechSat](https://huggingface.co/docs/transformers/model_doc/unispeech-sat)** (from Microsoft Research) released with the paper [UNISPEECH-SAT: UNIVERSAL SPEECH REPRESENTATION LEARNING WITH SPEAKER AWARE PRE-TRAINING](https://arxiv.org/abs/2110.05752) by Sanyuan Chen, Yu Wu, Chengyi Wang, Zhengyang Chen, Zhuo Chen, Shujie Liu, Jian Wu, Yao Qian, Furu Wei, Jinyu Li, Xiangzhan Yu.
|
1. **[UniSpeechSat](https://huggingface.co/docs/transformers/model_doc/unispeech-sat)** (from Microsoft Research) released with the paper [UNISPEECH-SAT: UNIVERSAL SPEECH REPRESENTATION LEARNING WITH SPEAKER AWARE PRE-TRAINING](https://arxiv.org/abs/2110.05752) by Sanyuan Chen, Yu Wu, Chengyi Wang, Zhengyang Chen, Zhuo Chen, Shujie Liu, Jian Wu, Yao Qian, Furu Wei, Jinyu Li, Xiangzhan Yu.
|
||||||
|
1. **[UPerNet](https://huggingface.co/docs/transformers/model_doc/upernet)** (from Peking University) released with the paper [Unified Perceptual Parsing for Scene Understanding](https://arxiv.org/abs/1807.10221) by Tete Xiao, Yingcheng Liu, Bolei Zhou, Yuning Jiang, Jian Sun.
|
||||||
1. **[VAN](https://huggingface.co/docs/transformers/model_doc/van)** (from Tsinghua University and Nankai University) released with the paper [Visual Attention Network](https://arxiv.org/pdf/2202.09741.pdf) by Meng-Hao Guo, Cheng-Ze Lu, Zheng-Ning Liu, Ming-Ming Cheng, Shi-Min Hu.
|
1. **[VAN](https://huggingface.co/docs/transformers/model_doc/van)** (from Tsinghua University and Nankai University) released with the paper [Visual Attention Network](https://arxiv.org/pdf/2202.09741.pdf) by Meng-Hao Guo, Cheng-Ze Lu, Zheng-Ning Liu, Ming-Ming Cheng, Shi-Min Hu.
|
||||||
1. **[VideoMAE](https://huggingface.co/docs/transformers/model_doc/videomae)** (from Multimedia Computing Group, Nanjing University) released with the paper [VideoMAE: Masked Autoencoders are Data-Efficient Learners for Self-Supervised Video Pre-Training](https://arxiv.org/abs/2203.12602) by Zhan Tong, Yibing Song, Jue Wang, Limin Wang.
|
1. **[VideoMAE](https://huggingface.co/docs/transformers/model_doc/videomae)** (from Multimedia Computing Group, Nanjing University) released with the paper [VideoMAE: Masked Autoencoders are Data-Efficient Learners for Self-Supervised Video Pre-Training](https://arxiv.org/abs/2203.12602) by Zhan Tong, Yibing Song, Jue Wang, Limin Wang.
|
||||||
1. **[ViLT](https://huggingface.co/docs/transformers/model_doc/vilt)** (from NAVER AI Lab/Kakao Enterprise/Kakao Brain) released with the paper [ViLT: Vision-and-Language Transformer Without Convolution or Region Supervision](https://arxiv.org/abs/2102.03334) by Wonjae Kim, Bokyung Son, Ildoo Kim.
|
1. **[ViLT](https://huggingface.co/docs/transformers/model_doc/vilt)** (from NAVER AI Lab/Kakao Enterprise/Kakao Brain) released with the paper [ViLT: Vision-and-Language Transformer Without Convolution or Region Supervision](https://arxiv.org/abs/2102.03334) by Wonjae Kim, Bokyung Son, Ildoo Kim.
|
||||||
1. **[Vision Transformer (ViT)](https://huggingface.co/docs/transformers/model_doc/vit)** (from Google AI) released with the paper [An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale](https://arxiv.org/abs/2010.11929) by Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, Neil Houlsby.
|
1. **[Vision Transformer (ViT)](https://huggingface.co/docs/transformers/model_doc/vit)** (from Google AI) released with the paper [An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale](https://arxiv.org/abs/2010.11929) by Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, Neil Houlsby.
|
||||||
1. **[VisualBERT](https://huggingface.co/docs/transformers/model_doc/visual_bert)** (from UCLA NLP) released with the paper [VisualBERT: A Simple and Performant Baseline for Vision and Language](https://arxiv.org/pdf/1908.03557) by Liunian Harold Li, Mark Yatskar, Da Yin, Cho-Jui Hsieh, Kai-Wei Chang.
|
1. **[VisualBERT](https://huggingface.co/docs/transformers/model_doc/visual_bert)** (from UCLA NLP) released with the paper [VisualBERT: A Simple and Performant Baseline for Vision and Language](https://arxiv.org/pdf/1908.03557) by Liunian Harold Li, Mark Yatskar, Da Yin, Cho-Jui Hsieh, Kai-Wei Chang.
|
||||||
|
1. **[ViT Hybrid](https://huggingface.co/docs/transformers/model_doc/vit_hybrid)** (from Google AI) released with the paper [An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale](https://arxiv.org/abs/2010.11929) by Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, Neil Houlsby.
|
||||||
1. **[ViTMAE](https://huggingface.co/docs/transformers/model_doc/vit_mae)** (from Meta AI) released with the paper [Masked Autoencoders Are Scalable Vision Learners](https://arxiv.org/abs/2111.06377) by Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Dollár, Ross Girshick.
|
1. **[ViTMAE](https://huggingface.co/docs/transformers/model_doc/vit_mae)** (from Meta AI) released with the paper [Masked Autoencoders Are Scalable Vision Learners](https://arxiv.org/abs/2111.06377) by Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Dollár, Ross Girshick.
|
||||||
1. **[ViTMSN](https://huggingface.co/docs/transformers/model_doc/vit_msn)** (from Meta AI) released with the paper [Masked Siamese Networks for Label-Efficient Learning](https://arxiv.org/abs/2204.07141) by Mahmoud Assran, Mathilde Caron, Ishan Misra, Piotr Bojanowski, Florian Bordes, Pascal Vincent, Armand Joulin, Michael Rabbat, Nicolas Ballas.
|
1. **[ViTMSN](https://huggingface.co/docs/transformers/model_doc/vit_msn)** (from Meta AI) released with the paper [Masked Siamese Networks for Label-Efficient Learning](https://arxiv.org/abs/2204.07141) by Mahmoud Assran, Mathilde Caron, Ishan Misra, Piotr Bojanowski, Florian Bordes, Pascal Vincent, Armand Joulin, Michael Rabbat, Nicolas Ballas.
|
||||||
|
1. **[ViViT](https://huggingface.co/docs/transformers/model_doc/vivit)** (from Google Research) released with the paper [ViViT: A Video Vision Transformer](https://arxiv.org/abs/2103.15691) by Anurag Arnab, Mostafa Dehghani, Georg Heigold, Chen Sun, Mario Lučić, Cordelia Schmid.
|
||||||
1. **[Wav2Vec2](https://huggingface.co/docs/transformers/model_doc/wav2vec2)** (from Facebook AI) released with the paper [wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations](https://arxiv.org/abs/2006.11477) by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli.
|
1. **[Wav2Vec2](https://huggingface.co/docs/transformers/model_doc/wav2vec2)** (from Facebook AI) released with the paper [wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations](https://arxiv.org/abs/2006.11477) by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli.
|
||||||
1. **[Wav2Vec2-Conformer](https://huggingface.co/docs/transformers/model_doc/wav2vec2-conformer)** (from Facebook AI) released with the paper [FAIRSEQ S2T: Fast Speech-to-Text Modeling with FAIRSEQ](https://arxiv.org/abs/2010.05171) by Changhan Wang, Yun Tang, Xutai Ma, Anne Wu, Sravya Popuri, Dmytro Okhonko, Juan Pino.
|
1. **[Wav2Vec2-Conformer](https://huggingface.co/docs/transformers/model_doc/wav2vec2-conformer)** (from Facebook AI) released with the paper [FAIRSEQ S2T: Fast Speech-to-Text Modeling with FAIRSEQ](https://arxiv.org/abs/2010.05171) by Changhan Wang, Yun Tang, Xutai Ma, Anne Wu, Sravya Popuri, Dmytro Okhonko, Juan Pino.
|
||||||
1. **[Wav2Vec2Phoneme](https://huggingface.co/docs/transformers/model_doc/wav2vec2_phoneme)** (from Facebook AI) released with the paper [Simple and Effective Zero-shot Cross-lingual Phoneme Recognition](https://arxiv.org/abs/2109.11680) by Qiantong Xu, Alexei Baevski, Michael Auli.
|
1. **[Wav2Vec2Phoneme](https://huggingface.co/docs/transformers/model_doc/wav2vec2_phoneme)** (from Facebook AI) released with the paper [Simple and Effective Zero-shot Cross-lingual Phoneme Recognition](https://arxiv.org/abs/2109.11680) by Qiantong Xu, Alexei Baevski, Michael Auli.
|
||||||
1. **[WavLM](https://huggingface.co/docs/transformers/model_doc/wavlm)** (from Microsoft Research) released with the paper [WavLM: Large-Scale Self-Supervised Pre-Training for Full Stack Speech Processing](https://arxiv.org/abs/2110.13900) by Sanyuan Chen, Chengyi Wang, Zhengyang Chen, Yu Wu, Shujie Liu, Zhuo Chen, Jinyu Li, Naoyuki Kanda, Takuya Yoshioka, Xiong Xiao, Jian Wu, Long Zhou, Shuo Ren, Yanmin Qian, Yao Qian, Jian Wu, Michael Zeng, Furu Wei.
|
1. **[WavLM](https://huggingface.co/docs/transformers/model_doc/wavlm)** (from Microsoft Research) released with the paper [WavLM: Large-Scale Self-Supervised Pre-Training for Full Stack Speech Processing](https://arxiv.org/abs/2110.13900) by Sanyuan Chen, Chengyi Wang, Zhengyang Chen, Yu Wu, Shujie Liu, Zhuo Chen, Jinyu Li, Naoyuki Kanda, Takuya Yoshioka, Xiong Xiao, Jian Wu, Long Zhou, Shuo Ren, Yanmin Qian, Yao Qian, Jian Wu, Michael Zeng, Furu Wei.
|
||||||
1. **[Whisper](https://huggingface.co/docs/transformers/model_doc/whisper)** (from OpenAI) released with the paper [Robust Speech Recognition via Large-Scale Weak Supervision](https://cdn.openai.com/papers/whisper.pdf) by Alec Radford, Jong Wook Kim, Tao Xu, Greg Brockman, Christine McLeavey, Ilya Sutskever.
|
1. **[Whisper](https://huggingface.co/docs/transformers/model_doc/whisper)** (from OpenAI) released with the paper [Robust Speech Recognition via Large-Scale Weak Supervision](https://cdn.openai.com/papers/whisper.pdf) by Alec Radford, Jong Wook Kim, Tao Xu, Greg Brockman, Christine McLeavey, Ilya Sutskever.
|
||||||
1. **[X-CLIP](https://huggingface.co/docs/transformers/model_doc/xclip)** (from Microsoft Research) released with the paper [Expanding Language-Image Pretrained Models for General Video Recognition](https://arxiv.org/abs/2208.02816) by Bolin Ni, Houwen Peng, Minghao Chen, Songyang Zhang, Gaofeng Meng, Jianlong Fu, Shiming Xiang, Haibin Ling.
|
1. **[X-CLIP](https://huggingface.co/docs/transformers/model_doc/xclip)** (from Microsoft Research) released with the paper [Expanding Language-Image Pretrained Models for General Video Recognition](https://arxiv.org/abs/2208.02816) by Bolin Ni, Houwen Peng, Minghao Chen, Songyang Zhang, Gaofeng Meng, Jianlong Fu, Shiming Xiang, Haibin Ling.
|
||||||
|
1. **[X-MOD](https://huggingface.co/docs/transformers/model_doc/xmod)** (from Meta AI) released with the paper [Lifting the Curse of Multilinguality by Pre-training Modular Transformers](http://dx.doi.org/10.18653/v1/2022.naacl-main.255) by Jonas Pfeiffer, Naman Goyal, Xi Lin, Xian Li, James Cross, Sebastian Riedel, Mikel Artetxe.
|
||||||
1. **[XGLM](https://huggingface.co/docs/transformers/model_doc/xglm)** (From Facebook AI) released with the paper [Few-shot Learning with Multilingual Language Models](https://arxiv.org/abs/2112.10668) by Xi Victoria Lin, Todor Mihaylov, Mikel Artetxe, Tianlu Wang, Shuohui Chen, Daniel Simig, Myle Ott, Naman Goyal, Shruti Bhosale, Jingfei Du, Ramakanth Pasunuru, Sam Shleifer, Punit Singh Koura, Vishrav Chaudhary, Brian O'Horo, Jeff Wang, Luke Zettlemoyer, Zornitsa Kozareva, Mona Diab, Veselin Stoyanov, Xian Li.
|
1. **[XGLM](https://huggingface.co/docs/transformers/model_doc/xglm)** (From Facebook AI) released with the paper [Few-shot Learning with Multilingual Language Models](https://arxiv.org/abs/2112.10668) by Xi Victoria Lin, Todor Mihaylov, Mikel Artetxe, Tianlu Wang, Shuohui Chen, Daniel Simig, Myle Ott, Naman Goyal, Shruti Bhosale, Jingfei Du, Ramakanth Pasunuru, Sam Shleifer, Punit Singh Koura, Vishrav Chaudhary, Brian O'Horo, Jeff Wang, Luke Zettlemoyer, Zornitsa Kozareva, Mona Diab, Veselin Stoyanov, Xian Li.
|
||||||
1. **[XLM](https://huggingface.co/docs/transformers/model_doc/xlm)** (from Facebook) released together with the paper [Cross-lingual Language Model Pretraining](https://arxiv.org/abs/1901.07291) by Guillaume Lample and Alexis Conneau.
|
1. **[XLM](https://huggingface.co/docs/transformers/model_doc/xlm)** (from Facebook) released together with the paper [Cross-lingual Language Model Pretraining](https://arxiv.org/abs/1901.07291) by Guillaume Lample and Alexis Conneau.
|
||||||
1. **[XLM-ProphetNet](https://huggingface.co/docs/transformers/model_doc/xlm-prophetnet)** (from Microsoft Research) released with the paper [ProphetNet: Predicting Future N-gram for Sequence-to-Sequence Pre-training](https://arxiv.org/abs/2001.04063) by Yu Yan, Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang and Ming Zhou.
|
1. **[XLM-ProphetNet](https://huggingface.co/docs/transformers/model_doc/xlm-prophetnet)** (from Microsoft Research) released with the paper [ProphetNet: Predicting Future N-gram for Sequence-to-Sequence Pre-training](https://arxiv.org/abs/2001.04063) by Yu Yan, Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang and Ming Zhou.
|
||||||
1. **[XLM-RoBERTa](https://huggingface.co/docs/transformers/model_doc/xlm-roberta)** (from Facebook AI), released together with the paper [Unsupervised Cross-lingual Representation Learning at Scale](https://arxiv.org/abs/1911.02116) by Alexis Conneau*, Kartikay Khandelwal*, Naman Goyal, Vishrav Chaudhary, Guillaume Wenzek, Francisco Guzmán, Edouard Grave, Myle Ott, Luke Zettlemoyer and Veselin Stoyanov.
|
1. **[XLM-RoBERTa](https://huggingface.co/docs/transformers/model_doc/xlm-roberta)** (from Facebook AI), released together with the paper [Unsupervised Cross-lingual Representation Learning at Scale](https://arxiv.org/abs/1911.02116) by Alexis Conneau*, Kartikay Khandelwal*, Naman Goyal, Vishrav Chaudhary, Guillaume Wenzek, Francisco Guzmán, Edouard Grave, Myle Ott, Luke Zettlemoyer and Veselin Stoyanov.
|
||||||
1. **[XLM-RoBERTa-XL](https://huggingface.co/docs/transformers/model_doc/xlm-roberta-xl)** (from Facebook AI) released with the paper [Larger-Scale Transformers for Multilingual Masked Language Modeling](https://arxiv.org/abs/2105.00572) by Naman Goyal, Jingfei Du, Myle Ott, Giri Anantharaman, Alexis Conneau.
|
1. **[XLM-RoBERTa-XL](https://huggingface.co/docs/transformers/model_doc/xlm-roberta-xl)** (from Facebook AI) released with the paper [Larger-Scale Transformers for Multilingual Masked Language Modeling](https://arxiv.org/abs/2105.00572) by Naman Goyal, Jingfei Du, Myle Ott, Giri Anantharaman, Alexis Conneau.
|
||||||
|
1. **[XLM-V](https://huggingface.co/docs/transformers/model_doc/xlm-v)** (from Meta AI) released with the paper [XLM-V: Overcoming the Vocabulary Bottleneck in Multilingual Masked Language Models](https://arxiv.org/abs/2301.10472) by Davis Liang, Hila Gonen, Yuning Mao, Rui Hou, Naman Goyal, Marjan Ghazvininejad, Luke Zettlemoyer, Madian Khabsa.
|
||||||
1. **[XLNet](https://huggingface.co/docs/transformers/model_doc/xlnet)** (from Google/CMU) released with the paper [XLNet: Generalized Autoregressive Pretraining for Language Understanding](https://arxiv.org/abs/1906.08237) by Zhilin Yang*, Zihang Dai*, Yiming Yang, Jaime Carbonell, Ruslan Salakhutdinov, Quoc V. Le.
|
1. **[XLNet](https://huggingface.co/docs/transformers/model_doc/xlnet)** (from Google/CMU) released with the paper [XLNet: Generalized Autoregressive Pretraining for Language Understanding](https://arxiv.org/abs/1906.08237) by Zhilin Yang*, Zihang Dai*, Yiming Yang, Jaime Carbonell, Ruslan Salakhutdinov, Quoc V. Le.
|
||||||
1. **[XLS-R](https://huggingface.co/docs/transformers/model_doc/xls_r)** (from Facebook AI) released with the paper [XLS-R: Self-supervised Cross-lingual Speech Representation Learning at Scale](https://arxiv.org/abs/2111.09296) by Arun Babu, Changhan Wang, Andros Tjandra, Kushal Lakhotia, Qiantong Xu, Naman Goyal, Kritika Singh, Patrick von Platen, Yatharth Saraf, Juan Pino, Alexei Baevski, Alexis Conneau, Michael Auli.
|
1. **[XLS-R](https://huggingface.co/docs/transformers/model_doc/xls_r)** (from Facebook AI) released with the paper [XLS-R: Self-supervised Cross-lingual Speech Representation Learning at Scale](https://arxiv.org/abs/2111.09296) by Arun Babu, Changhan Wang, Andros Tjandra, Kushal Lakhotia, Qiantong Xu, Naman Goyal, Kritika Singh, Patrick von Platen, Yatharth Saraf, Juan Pino, Alexei Baevski, Alexis Conneau, Michael Auli.
|
||||||
1. **[XLSR-Wav2Vec2](https://huggingface.co/docs/transformers/model_doc/xlsr_wav2vec2)** (from Facebook AI) released with the paper [Unsupervised Cross-Lingual Representation Learning For Speech Recognition](https://arxiv.org/abs/2006.13979) by Alexis Conneau, Alexei Baevski, Ronan Collobert, Abdelrahman Mohamed, Michael Auli.
|
1. **[XLSR-Wav2Vec2](https://huggingface.co/docs/transformers/model_doc/xlsr_wav2vec2)** (from Facebook AI) released with the paper [Unsupervised Cross-Lingual Representation Learning For Speech Recognition](https://arxiv.org/abs/2006.13979) by Alexis Conneau, Alexei Baevski, Ronan Collobert, Abdelrahman Mohamed, Michael Auli.
|
||||||
|
|||||||
603
awesome-transformers.md
Normal file
603
awesome-transformers.md
Normal file
@ -0,0 +1,603 @@
|
|||||||
|
# Awesome projects built with Transformers
|
||||||
|
|
||||||
|
This page lists awesome projects built on top of Transformers. Transformers is more than a toolkit to use pretrained
|
||||||
|
models: it's a community of projects built around it and the Hugging Face Hub. We want Transformers to enable
|
||||||
|
developers, researchers, students, professors, engineers, and anyone else to build their dream projects.
|
||||||
|
|
||||||
|
In this list, we showcase incredibly impactful and novel projects that have pushed the field forward. We celebrate
|
||||||
|
100 of these projects as we reach the milestone of 100k stars as a community; but we're very open to pull requests
|
||||||
|
adding other projects to the list. If you believe a project should be here and it's not, then please, open a PR
|
||||||
|
to add it.
|
||||||
|
|
||||||
|
## [gpt4all](https://github.com/nomic-ai/gpt4all)
|
||||||
|
|
||||||
|
[gpt4all](https://github.com/nomic-ai/gpt4all) is an ecosystem of open-source chatbots trained on massive collections of clean assistant data including code, stories and dialogue. It offers open-source, large language models such as LLaMA and GPT-J trained in an assistant-style.
|
||||||
|
|
||||||
|
Keywords: Open-source, LLaMa, GPT-J, instruction, assistant
|
||||||
|
|
||||||
|
## [recommenders](https://github.com/microsoft/recommenders)
|
||||||
|
|
||||||
|
This repository contains examples and best practices for building recommendation systems, provided as Jupyter notebooks. It goes over several aspects required to build efficient recommendation systems: data preparation, modeling, evaluation, model selection & optimization, as well as operationalization
|
||||||
|
|
||||||
|
Keywords: Recommender systems, AzureML
|
||||||
|
|
||||||
|
## [lama-cleaner](https://github.com/Sanster/lama-cleaner)
|
||||||
|
|
||||||
|
Image inpainting tool powered by Stable Diffusion. Remove any unwanted object, defect, people from your pictures or erase and replace anything on your pictures.
|
||||||
|
|
||||||
|
Keywords: inpainting, SD, Stable Diffusion
|
||||||
|
|
||||||
|
## [flair](https://github.com/flairNLP/flair)
|
||||||
|
|
||||||
|
FLAIR is a powerful PyTorch NLP framework, convering several important tasks: NER, sentiment-analysis, part-of-speech tagging, text and document embeddings, among other things.
|
||||||
|
|
||||||
|
Keywords: NLP, text embedding, document embedding, biomedical, NER, PoS, sentiment-analysis
|
||||||
|
|
||||||
|
## [mindsdb](https://github.com/mindsdb/mindsdb)
|
||||||
|
|
||||||
|
MindsDB is a low-code ML platform, which automates and integrates several ML frameworks into the data stack as "AI Tables" to streamline the integration of AI into applications, making it accessible to developers of all skill levels.
|
||||||
|
|
||||||
|
Keywords: Database, low-code, AI table
|
||||||
|
|
||||||
|
## [langchain](https://github.com/hwchase17/langchain)
|
||||||
|
|
||||||
|
[langchain](https://github.com/hwchase17/langchain) is aimed at assisting in the development of apps merging both LLMs and other sources of knowledge. The library allows chaining calls to applications, creating a sequence across many tools.
|
||||||
|
|
||||||
|
Keywords: LLMs, Large Language Models, Agents, Chains
|
||||||
|
|
||||||
|
## [LlamaIndex](https://github.com/jerryjliu/llama_index)
|
||||||
|
|
||||||
|
[LlamaIndex](https://github.com/jerryjliu/llama_index) is a project that provides a central interface to connect your LLM's with external data. It provides various kinds of indices and retreival mechanisms to perform different LLM tasks and obtain knowledge-augmented results.
|
||||||
|
|
||||||
|
Keywords: LLMs, Large Language Models, Data Retrieval, Indices, Knowledge Augmentation
|
||||||
|
|
||||||
|
## [ParlAI](https://github.com/facebookresearch/ParlAI)
|
||||||
|
|
||||||
|
[ParlAI](https://github.com/facebookresearch/ParlAI) is a python framework for sharing, training and testing dialogue models, from open-domain chitchat, to task-oriented dialogue, to visual question answering. It provides more than 100 datasets under the same API, a large zoo of pretrained models, a set of agents, and has several integrations.
|
||||||
|
|
||||||
|
Keywords: Dialogue, Chatbots, VQA, Datasets, Agents
|
||||||
|
|
||||||
|
## [sentence-transformers](https://github.com/UKPLab/sentence-transformers)
|
||||||
|
|
||||||
|
This framework provides an easy method to compute dense vector representations for sentences, paragraphs, and images. The models are based on transformer networks like BERT / RoBERTa / XLM-RoBERTa etc. and achieve state-of-the-art performance in various task. Text is embedding in vector space such that similar text is close and can efficiently be found using cosine similarity.
|
||||||
|
|
||||||
|
Keywords: Dense vector representations, Text embeddings, Sentence embeddings
|
||||||
|
|
||||||
|
## [ludwig](https://github.com/ludwig-ai/ludwig)
|
||||||
|
|
||||||
|
Ludwig is a declarative machine learning framework that makes it easy to define machine learning pipelines using a simple and flexible data-driven configuration system. Ludwig is targeted at a wide variety of AI tasks. It provides a data-driven configuration system, training, prediction, and evaluation scripts, as well as a programmatic API.
|
||||||
|
|
||||||
|
Keywords: Declarative, Data-driven, ML Framework
|
||||||
|
|
||||||
|
## [InvokeAI](https://github.com/invoke-ai/InvokeAI)
|
||||||
|
|
||||||
|
[InvokeAI](https://github.com/invoke-ai/InvokeAI) is an engine for Stable Diffusion models, aimed at professionals, artists, and enthusiasts. It leverages the latest AI-driven technologies through CLI as well as a WebUI.
|
||||||
|
|
||||||
|
Keywords: Stable-Diffusion, WebUI, CLI
|
||||||
|
|
||||||
|
## [PaddleNLP](https://github.com/PaddlePaddle/PaddleNLP)
|
||||||
|
|
||||||
|
[PaddleNLP](https://github.com/PaddlePaddle/PaddleNLP) is an easy-to-use and powerful NLP library particularly targeted at the Chinese languages. It has support for multiple pre-trained model zoos, and supports a wide-range of NLP tasks from research to industrial applications.
|
||||||
|
|
||||||
|
Keywords: NLP, Chinese, Research, Industry
|
||||||
|
|
||||||
|
## [stanza](https://github.com/stanfordnlp/stanza)
|
||||||
|
|
||||||
|
The Stanford NLP Group's official Python NLP library. It contains support for running various accurate natural language processing tools on 60+ languages and for accessing the Java Stanford CoreNLP software from Python.
|
||||||
|
|
||||||
|
Keywords: NLP, Multilingual, CoreNLP
|
||||||
|
|
||||||
|
## [DeepPavlov](https://github.com/deeppavlov/DeepPavlov)
|
||||||
|
|
||||||
|
[DeepPavlov](https://github.com/deeppavlov/DeepPavlov) is an open-source conversational AI library. It is designed for the development of production ready chat-bots and complex conversational systems, as well as research in the area of NLP and, particularly, of dialog systems.
|
||||||
|
|
||||||
|
Keywords: Conversational, Chatbot, Dialog
|
||||||
|
|
||||||
|
## [alpaca-lora](https://github.com/tloen/alpaca-lora)
|
||||||
|
|
||||||
|
Alpaca-lora contains code for reproducing the Stanford Alpaca results using low-rank adaptation (LoRA). The repository provides training (fine-tuning) as well as generation scripts.
|
||||||
|
|
||||||
|
Keywords: LoRA, Parameter-efficient fine-tuning
|
||||||
|
|
||||||
|
## [imagen-pytorch](https://github.com/lucidrains/imagen-pytorch)
|
||||||
|
|
||||||
|
An open-source Implementation of Imagen, Google's closed-source Text-to-Image Neural Network that beats DALL-E2. As of release, it is the new SOTA for text-to-image synthesis.
|
||||||
|
|
||||||
|
Keywords: Imagen, Text-to-image
|
||||||
|
|
||||||
|
## [adapter-transformers](https://github.com/adapter-hub/adapter-transformers)
|
||||||
|
|
||||||
|
[adapter-transformers](https://github.com/adapter-hub/adapter-transformers) is an extension of HuggingFace's Transformers library, integrating adapters into state-of-the-art language models by incorporating AdapterHub, a central repository for pre-trained adapter modules. It is a drop-in replacement for transformers, which is regularly updated to stay up-to-date with the developments of transformers.
|
||||||
|
|
||||||
|
Keywords: Adapters, LoRA, Parameter-efficient fine-tuning, Hub
|
||||||
|
|
||||||
|
## [NeMo](https://github.com/NVIDIA/NeMo)
|
||||||
|
|
||||||
|
NVIDIA [NeMo](https://github.com/NVIDIA/NeMo) is a conversational AI toolkit built for researchers working on automatic speech recognition (ASR), text-to-speech synthesis (TTS), large language models (LLMs), and natural language processing (NLP). The primary objective of [NeMo](https://github.com/NVIDIA/NeMo) is to help researchers from industry and academia to reuse prior work (code and pretrained models) and make it easier to create new https://developer.nvidia.com/conversational-ai#started.
|
||||||
|
|
||||||
|
Keywords: Conversational, ASR, TTS, LLMs, NLP
|
||||||
|
|
||||||
|
## [Runhouse](https://github.com/run-house/runhouse)
|
||||||
|
|
||||||
|
[Runhouse](https://github.com/run-house/runhouse) allows to send code and data to any of your compute or data infra, all in Python, and continue to interact with them normally from your existing code and environment. Runhouse developers mention:
|
||||||
|
|
||||||
|
> Think of it as an expansion pack to your Python interpreter that lets it take detours to remote machines or manipulate remote data.
|
||||||
|
|
||||||
|
Keywords: MLOps, Infrastructure, Data storage, Modeling
|
||||||
|
|
||||||
|
## [MONAI](https://github.com/Project-MONAI/MONAI)
|
||||||
|
|
||||||
|
[MONAI](https://github.com/Project-MONAI/MONAI) is a PyTorch-based, open-source framework for deep learning in healthcare imaging, part of PyTorch Ecosystem. Its ambitions are:
|
||||||
|
- developing a community of academic, industrial and clinical researchers collaborating on a common foundation;
|
||||||
|
- creating state-of-the-art, end-to-end training workflows for healthcare imaging;
|
||||||
|
- providing researchers with the optimized and standardized way to create and evaluate deep learning models.
|
||||||
|
|
||||||
|
Keywords: Healthcare imaging, Training, Evaluation
|
||||||
|
|
||||||
|
## [simpletransformers](https://github.com/ThilinaRajapakse/simpletransformers)
|
||||||
|
|
||||||
|
Simple Transformers lets you quickly train and evaluate Transformer models. Only 3 lines of code are needed to initialize, train, and evaluate a model. It supports a wide variety of NLP tasks.
|
||||||
|
|
||||||
|
Keywords: Framework, simplicity, NLP
|
||||||
|
|
||||||
|
## [JARVIS](https://github.com/microsoft/JARVIS)
|
||||||
|
|
||||||
|
[JARVIS](https://github.com/microsoft/JARVIS) is a system attempting to merge LLMs such as GPT-4 with the rest of the open-source ML community: leveraging up to 60 downstream models in order to perform tasks identified by the LLM.
|
||||||
|
|
||||||
|
Keywords: LLM, Agents, HF Hub
|
||||||
|
|
||||||
|
## [transformers.js](https://xenova.github.io/transformers.js/)
|
||||||
|
|
||||||
|
[transformers.js](https://xenova.github.io/transformers.js/) is a JavaScript library targeted at running models from transformers directly within the browser.
|
||||||
|
|
||||||
|
Keywords: Transformers, JavaScript, browser
|
||||||
|
|
||||||
|
## [bumblebee](https://github.com/elixir-nx/bumblebee)
|
||||||
|
|
||||||
|
Bumblebee provides pre-trained Neural Network models on top of Axon, a neural networks library for the Elixir language. It includes integration with 🤗 Models, allowing anyone to download and perform Machine Learning tasks with few lines of code.
|
||||||
|
|
||||||
|
Keywords: Elixir, Axon
|
||||||
|
|
||||||
|
## [argilla](https://github.com/argilla-io/argilla)
|
||||||
|
|
||||||
|
Argilla is an open-source platform providing advanced NLP labeling, monitoring, and workspaces. It is compatible with many open source ecosystems such as Hugging Face, Stanza, FLAIR, and others.
|
||||||
|
|
||||||
|
Keywords: NLP, Labeling, Monitoring, Workspaces
|
||||||
|
|
||||||
|
## [haystack](https://github.com/deepset-ai/haystack)
|
||||||
|
|
||||||
|
Haystack is an open source NLP framework to interact with your data using Transformer models and LLMs. It offers production-ready tools to quickly build complex decision making, question answering, semantic search, text generation applications, and more.
|
||||||
|
|
||||||
|
Keywords: NLP, Framework, LLM
|
||||||
|
|
||||||
|
## [spaCy](https://github.com/explosion/spaCy)
|
||||||
|
|
||||||
|
[spaCy](https://github.com/explosion/spaCy) is a library for advanced Natural Language Processing in Python and Cython. It's built on the very latest research, and was designed from day one to be used in real products. It offers support for transformers models through its third party package, spacy-transformers.
|
||||||
|
|
||||||
|
Keywords: NLP, Framework
|
||||||
|
|
||||||
|
## [speechbrain](https://github.com/speechbrain/speechbrain)
|
||||||
|
|
||||||
|
SpeechBrain is an open-source and all-in-one conversational AI toolkit based on PyTorch.
|
||||||
|
The goal is to create a single, flexible, and user-friendly toolkit that can be used to easily develop state-of-the-art speech technologies, including systems for speech recognition, speaker recognition, speech enhancement, speech separation, language identification, multi-microphone signal processing, and many others.
|
||||||
|
|
||||||
|
Keywords: Conversational, Speech
|
||||||
|
|
||||||
|
## [skorch](https://github.com/skorch-dev/skorch)
|
||||||
|
|
||||||
|
Skorch is a scikit-learn compatible neural network library that wraps PyTorch. It has support for models within transformers, and tokenizers from tokenizers.
|
||||||
|
|
||||||
|
Keywords: Scikit-Learn, PyTorch
|
||||||
|
|
||||||
|
## [bertviz](https://github.com/jessevig/bertviz)
|
||||||
|
|
||||||
|
BertViz is an interactive tool for visualizing attention in Transformer language models such as BERT, GPT2, or T5. It can be run inside a Jupyter or Colab notebook through a simple Python API that supports most Huggingface models.
|
||||||
|
|
||||||
|
Keywords: Visualization, Transformers
|
||||||
|
|
||||||
|
## [mesh-transformer-jax](https://github.com/kingoflolz/mesh-transformer-jax)
|
||||||
|
|
||||||
|
[mesh-transformer-jax](https://github.com/kingoflolz/mesh-transformer-jax) is a haiku library using the xmap/pjit operators in JAX for model parallelism of transformers. This library is designed for scalability up to approximately 40B parameters on TPUv3s. It was the library used to train the GPT-J model.
|
||||||
|
|
||||||
|
Keywords: Haiku, Model parallelism, LLM, TPU
|
||||||
|
|
||||||
|
## [deepchem](https://github.com/deepchem/deepchem)
|
||||||
|
|
||||||
|
DeepChem aims to provide a high quality open-source toolchain that democratizes the use of deep-learning in drug discovery, materials science, quantum chemistry, and biology.
|
||||||
|
|
||||||
|
Keywords: Drug discovery, Materials Science, Quantum Chemistry, Biology
|
||||||
|
|
||||||
|
## [OpenNRE](https://github.com/thunlp/OpenNRE)
|
||||||
|
|
||||||
|
An Open-Source Package for Neural Relation Extraction (NRE). It is targeted at a wide range of users, from newcomers to relation extraction, to developers, researchers, or students.
|
||||||
|
|
||||||
|
Keywords: Neural Relation Extraction, Framework
|
||||||
|
|
||||||
|
## [pycorrector](https://github.com/shibing624/pycorrector)
|
||||||
|
|
||||||
|
PyCorrector is a Chinese Text Error Correction Tool. It uses a language model to detect errors, pinyin feature and shape feature to correct Chinese text errors. it can be used for Chinese Pinyin and stroke input method.
|
||||||
|
|
||||||
|
Keywords: Chinese, Error correction tool, Language model, Pinyin
|
||||||
|
|
||||||
|
## [nlpaug](https://github.com/makcedward/nlpaug)
|
||||||
|
|
||||||
|
This python library helps you with augmenting nlp for machine learning projects. It is a lightweight library featuring synthetic data generation for improving model performance, support for audio and text, and compatibility with several ecosystems (scikit-learn, pytorch, tensorflow).
|
||||||
|
|
||||||
|
Keywords: Data augmentation, Synthetic data generation, Audio, NLP
|
||||||
|
|
||||||
|
## [dream-textures](https://github.com/carson-katri/dream-textures)
|
||||||
|
|
||||||
|
[dream-textures](https://github.com/carson-katri/dream-textures) is a library targeted at bringing stable-diffusion support within Blender. It supports several use-cases, such as image generation, texture projection, inpainting/outpainting, ControlNet, and upscaling.
|
||||||
|
|
||||||
|
Keywords: Stable-Diffusion, Blender
|
||||||
|
|
||||||
|
## [seldon-core](https://github.com/SeldonIO/seldon-core)
|
||||||
|
|
||||||
|
Seldon core converts your ML models (Tensorflow, Pytorch, H2o, etc.) or language wrappers (Python, Java, etc.) into production REST/GRPC microservices.
|
||||||
|
Seldon handles scaling to thousands of production machine learning models and provides advanced machine learning capabilities out of the box including Advanced Metrics, Request Logging, Explainers, Outlier Detectors, A/B Tests, Canaries and more.
|
||||||
|
|
||||||
|
Keywords: Microservices, Modeling, Language wrappers
|
||||||
|
|
||||||
|
## [open_model_zoo](https://github.com/openvinotoolkit/open_model_zoo)
|
||||||
|
|
||||||
|
This repository includes optimized deep learning models and a set of demos to expedite development of high-performance deep learning inference applications. Use these free pre-trained models instead of training your own models to speed-up the development and production deployment process.
|
||||||
|
|
||||||
|
Keywords: Optimized models, Demos
|
||||||
|
|
||||||
|
## [ml-stable-diffusion](https://github.com/apple/ml-stable-diffusion)
|
||||||
|
|
||||||
|
ML-Stable-Diffusion is a repository by Apple bringing Stable Diffusion support to Core ML, on Apple Silicon devices. It supports stable diffusion checkpoints hosted on the Hugging Face Hub.
|
||||||
|
|
||||||
|
Keywords: Stable Diffusion, Apple Silicon, Core ML
|
||||||
|
|
||||||
|
## [stable-dreamfusion](https://github.com/ashawkey/stable-dreamfusion)
|
||||||
|
|
||||||
|
Stable-Dreamfusion is a pytorch implementation of the text-to-3D model Dreamfusion, powered by the Stable Diffusion text-to-2D model.
|
||||||
|
|
||||||
|
Keywords: Text-to-3D, Stable Diffusion
|
||||||
|
|
||||||
|
## [txtai](https://github.com/neuml/txtai)
|
||||||
|
|
||||||
|
[txtai](https://github.com/neuml/txtai) is an open-source platform for semantic search and workflows powered by language models. txtai builds embeddings databases, which are a union of vector indexes and relational databases enabling similarity search with SQL. Semantic workflows connect language models together into unified applications.
|
||||||
|
|
||||||
|
Keywords: Semantic search, LLM
|
||||||
|
|
||||||
|
## [djl](https://github.com/deepjavalibrary/djl)
|
||||||
|
|
||||||
|
Deep Java Library (DJL) is an open-source, high-level, engine-agnostic Java framework for deep learning. DJL is designed to be easy to get started with and simple to use for developers. DJL provides a native Java development experience and functions like any other regular Java library. DJL offers [a Java binding](https://github.com/deepjavalibrary/djl/tree/master/extensions/tokenizers) for HuggingFace Tokenizers and easy conversion toolkit for HuggingFace model to deploy in Java.
|
||||||
|
|
||||||
|
Keywords: Java, Framework
|
||||||
|
|
||||||
|
## [lm-evaluation-harness](https://github.com/EleutherAI/lm-evaluation-harness/)
|
||||||
|
|
||||||
|
This project provides a unified framework to test generative language models on a large number of different evaluation tasks. It has support for more than 200 tasks, and supports different ecosystems: HF Transformers, GPT-NeoX, DeepSpeed, as well as the OpenAI API.
|
||||||
|
|
||||||
|
Keywords: LLM, Evaluation, Few-shot
|
||||||
|
|
||||||
|
## [gpt-neox](https://github.com/EleutherAI/gpt-neox)
|
||||||
|
|
||||||
|
This repository records EleutherAI's library for training large-scale language models on GPUs. The framework is based on NVIDIA's Megatron Language Model and has been augmented with techniques from DeepSpeed as well as some novel optimizations. It is focused on training multi-billion-parameter models.
|
||||||
|
|
||||||
|
Keywords: Training, LLM, Megatron, DeepSpeed
|
||||||
|
|
||||||
|
## [muzic](https://github.com/microsoft/muzic)
|
||||||
|
|
||||||
|
Muzic is a research project on AI music that empowers music understanding and generation with deep learning and artificial intelligence. Muzic was created by researchers from Microsoft Research Asia.
|
||||||
|
|
||||||
|
Keywords: Music understanding, Music generation
|
||||||
|
|
||||||
|
## [dalle-flow](https://github.com/jina-ai/dalle-flow)
|
||||||
|
|
||||||
|
DALL·E Flow is an interactive workflow for generating high-definition images from a text prompt. Itt leverages DALL·E-Mega, GLID-3 XL, and Stable Diffusion to generate image candidates, and then calls CLIP-as-service to rank the candidates w.r.t. the prompt.
|
||||||
|
The preferred candidate is fed to GLID-3 XL for diffusion, which often enriches the texture and background. Finally, the candidate is upscaled to 1024x1024 via SwinIR.
|
||||||
|
|
||||||
|
Keywords: High-definition image generation, Stable Diffusion, DALL-E Mega, GLID-3 XL, CLIP, SwinIR
|
||||||
|
|
||||||
|
## [lightseq](https://github.com/bytedance/lightseq)
|
||||||
|
|
||||||
|
LightSeq is a high performance training and inference library for sequence processing and generation implemented in CUDA. It enables highly efficient computation of modern NLP and CV models such as BERT, GPT, Transformer, etc. It is therefore best useful for machine translation, text generation, image classification, and other sequence related tasks.
|
||||||
|
|
||||||
|
Keywords: Training, Inference, Sequence Processing, Sequence Generation
|
||||||
|
|
||||||
|
## [LaTeX-OCR](https://github.com/lukas-blecher/LaTeX-OCR)
|
||||||
|
|
||||||
|
The goal of this project is to create a learning based system that takes an image of a math formula and returns corresponding LaTeX code.
|
||||||
|
|
||||||
|
Keywords: OCR, LaTeX, Math formula
|
||||||
|
|
||||||
|
## [open_clip](https://github.com/mlfoundations/open_clip)
|
||||||
|
|
||||||
|
OpenCLIP is an open source implementation of OpenAI's CLIP.
|
||||||
|
|
||||||
|
The goal of this repository is to enable training models with contrastive image-text supervision, and to investigate their properties such as robustness to distribution shift.
|
||||||
|
The starting point is an implementation of CLIP that matches the accuracy of the original CLIP models when trained on the same dataset.
|
||||||
|
|
||||||
|
Specifically, a ResNet-50 model trained with this codebase on OpenAI's 15 million image subset of YFCC achieves 32.7% top-1 accuracy on ImageNet.
|
||||||
|
|
||||||
|
Keywords: CLIP, Open-source, Contrastive, Image-text
|
||||||
|
|
||||||
|
## [dalle-playground](https://github.com/saharmor/dalle-playground)
|
||||||
|
|
||||||
|
A playground to generate images from any text prompt using Stable Diffusion and Dall-E mini.
|
||||||
|
|
||||||
|
Keywords: WebUI, Stable Diffusion, Dall-E mini
|
||||||
|
|
||||||
|
## [FedML](https://github.com/FedML-AI/FedML)
|
||||||
|
|
||||||
|
[FedML](https://github.com/FedML-AI/FedML) is a federated learning and analytics library enabling secure and collaborative machine learning on decentralized data anywhere at any scale.
|
||||||
|
|
||||||
|
It supports large-scale cross-silo federated learning, and cross-device federated learning on smartphones/IoTs, and research simulation.
|
||||||
|
|
||||||
|
Keywords: Federated Learning, Analytics, Collaborative ML, Decentralized
|
||||||
|
|
||||||
|
## [gpt-code-clippy](https://github.com/CodedotAl/gpt-code-clippy)
|
||||||
|
|
||||||
|
GPT-Code-Clippy (GPT-CC) is an open source version of GitHub Copilot, a language model -- based on GPT-3, called GPT-Codex -- that is fine-tuned on publicly available code from GitHub.
|
||||||
|
|
||||||
|
Keywords: LLM, Code
|
||||||
|
|
||||||
|
## [TextAttack](https://github.com/QData/TextAttack)
|
||||||
|
|
||||||
|
[TextAttack](https://github.com/QData/TextAttack) 🐙 is a Python framework for adversarial attacks, data augmentation, and model training in NLP.
|
||||||
|
|
||||||
|
Keywords: Adversarial attacks, Data augmentation, NLP
|
||||||
|
|
||||||
|
## [OpenPrompt](https://github.com/thunlp/OpenPrompt)
|
||||||
|
|
||||||
|
Prompt-learning is a paradigm to adapt pre-trained language models (PLMs) to downstream NLP tasks, which modify the input text with a textual template and directly uses PLMs to conduct pre-trained tasks. This library provides a standard, flexible and extensible framework to deploy the prompt-learning pipeline. [OpenPrompt](https://github.com/thunlp/OpenPrompt) supports loading PLMs directly from https://github.com/huggingface/transformers.
|
||||||
|
|
||||||
|
## [text-generation-webui](https://github.com/oobabooga/text-generation-webui/)
|
||||||
|
|
||||||
|
[text-generation-webui](https://github.com/oobabooga/text-generation-webui/) is a Gradio Web UI for running Large Language Models like LLaMA, llama.cpp, GPT-J, Pythia, OPT, and GALACTICA.
|
||||||
|
|
||||||
|
Keywords: LLM, WebUI
|
||||||
|
|
||||||
|
## [libra](https://github.com/Palashio/libra)
|
||||||
|
|
||||||
|
An ergonomic machine learning [libra](https://github.com/Palashio/libra)ry for non-technical users. It focuses on ergonomics and on ensuring that training a model is as simple as it can be.
|
||||||
|
|
||||||
|
Keywords: Ergonomic, Non-technical
|
||||||
|
|
||||||
|
## [alibi](https://github.com/SeldonIO/alibi)
|
||||||
|
|
||||||
|
Alibi is an open source Python library aimed at machine learning model inspection and interpretation. The focus of the library is to provide high-quality implementations of black-box, white-box, local and global explanation methods for classification and regression models.
|
||||||
|
|
||||||
|
Keywords: Model inspection, Model interpretation, Black-box, White-box
|
||||||
|
|
||||||
|
## [tortoise-tts](https://github.com/neonbjb/tortoise-tts)
|
||||||
|
|
||||||
|
Tortoise is a text-to-speech program built with the following priorities: strong multi-voice capabilities, and highly realistic prosody and intonation.
|
||||||
|
|
||||||
|
Keywords: Text-to-speech
|
||||||
|
|
||||||
|
## [flower](https://github.com/adap/flower)
|
||||||
|
|
||||||
|
Flower (flwr) is a framework for building federated learning systems. The design of Flower is based on a few guiding principles: customizability, extendability, framework agnosticity, and ease-of-use.
|
||||||
|
|
||||||
|
Keywords: Federated learning systems, Customizable, Extendable, Framework-agnostic, Simplicity
|
||||||
|
|
||||||
|
## [fast-bert](https://github.com/utterworks/fast-bert)
|
||||||
|
|
||||||
|
Fast-Bert is a deep learning library that allows developers and data scientists to train and deploy BERT and XLNet based models for natural language processing tasks beginning with Text Classification. It is aimed at simplicity.
|
||||||
|
|
||||||
|
Keywords: Deployment, BERT, XLNet
|
||||||
|
|
||||||
|
## [towhee](https://github.com/towhee-io/towhee)
|
||||||
|
|
||||||
|
Towhee makes it easy to build neural data processing pipelines for AI applications. We provide hundreds of models, algorithms, and transformations that can be used as standard pipeline building blocks. Users can use Towhee's Pythonic API to build a prototype of their pipeline and automatically optimize it for production-ready environments.
|
||||||
|
|
||||||
|
Keywords: Data processing pipeline, Optimization
|
||||||
|
|
||||||
|
## [alibi-detect](https://github.com/SeldonIO/alibi-detect)
|
||||||
|
|
||||||
|
Alibi Detect is an open source Python library focused on outlier, adversarial and drift detection. The package aims to cover both online and offline detectors for tabular data, text, images and time series. Both TensorFlow and PyTorch backends are supported for drift detection.
|
||||||
|
|
||||||
|
Keywords: Adversarial, Outlier, Drift detection
|
||||||
|
|
||||||
|
## [FARM](https://github.com/deepset-ai/FARM)
|
||||||
|
|
||||||
|
[FARM](https://github.com/deepset-ai/FARM) makes Transfer Learning with BERT & Co simple, fast and enterprise-ready. It's built upon transformers and provides additional features to simplify the life of developers: Parallelized preprocessing, highly modular design, multi-task learning, experiment tracking, easy debugging and close integration with AWS SageMaker.
|
||||||
|
|
||||||
|
Keywords: Transfer Learning, Modular design, Multi-task learning, Experiment tracking
|
||||||
|
|
||||||
|
## [aitextgen](https://github.com/minimaxir/aitextgen)
|
||||||
|
|
||||||
|
A robust Python tool for text-based AI training and generation using OpenAI's GPT-2 and EleutherAI's GPT Neo/GPT-3 architecture.
|
||||||
|
[aitextgen](https://github.com/minimaxir/aitextgen) is a Python package that leverages PyTorch, Hugging Face Transformers and pytorch-lightning with specific optimizations for text generation using GPT-2, plus many added features.
|
||||||
|
|
||||||
|
Keywords: Training, Generation
|
||||||
|
|
||||||
|
## [diffgram](https://github.com/diffgram/diffgram)
|
||||||
|
|
||||||
|
Diffgram aims to integrate human supervision into platforms. We support your team programmatically changing the UI (Schema, layout, etc.) like in Streamlit. This means that you can collect and annotate timely data from users. In other words, we are the platform behind your platform, an integrated part of your application, to ship new & better AI products faster.
|
||||||
|
|
||||||
|
Keywords: Human supervision, Platform
|
||||||
|
|
||||||
|
## [ecco](https://github.com/jalammar/ecco)
|
||||||
|
|
||||||
|
Explain, analyze, and visualize NLP language models. Ecco creates interactive visualizations directly in Jupyter notebooks explaining the behavior of Transformer-based language models (like GPT2, BERT, RoBERTA, T5, and T0).
|
||||||
|
|
||||||
|
Keywords: Model explainability
|
||||||
|
|
||||||
|
## [s3prl](https://github.com/s3prl/s3prl)
|
||||||
|
|
||||||
|
[s3prl](https://github.com/s3prl/s3prl) stands for Self-Supervised Speech Pre-training and Representation Learning. Self-supervised speech pre-trained models are called upstream in this toolkit, and are utilized in various downstream tasks.
|
||||||
|
|
||||||
|
Keywords: Speech, Training
|
||||||
|
|
||||||
|
## [ru-dalle](https://github.com/ai-forever/ru-dalle)
|
||||||
|
|
||||||
|
RuDALL-E aims to be similar to DALL-E, targeted to Russian.
|
||||||
|
|
||||||
|
Keywords: DALL-E, Russian
|
||||||
|
|
||||||
|
## [DeepKE](https://github.com/zjunlp/DeepKE)
|
||||||
|
|
||||||
|
[DeepKE](https://github.com/zjunlp/DeepKE) is a knowledge extraction toolkit for knowledge graph construction supporting cnSchema,low-resource, document-level and multimodal scenarios for entity, relation and attribute extraction.
|
||||||
|
|
||||||
|
Keywords: Knowledge Extraction, Knowledge Graphs
|
||||||
|
|
||||||
|
## [Nebuly](https://github.com/nebuly-ai/nebuly)
|
||||||
|
|
||||||
|
Nebuly is the next-generation platform to monitor and optimize your AI costs in one place. The platform connects to all your AI cost sources (compute, API providers, AI software licenses, etc) and centralizes them in one place to give you full visibility on a model basis. The platform also provides optimization recommendations and a co-pilot model that can guide during the optimization process. The platform builds on top of the open-source tools allowing you to optimize the different steps of your AI stack to squeeze out the best possible cost performances.
|
||||||
|
|
||||||
|
Keywords: Optimization, Performance, Monitoring
|
||||||
|
|
||||||
|
## [imaginAIry](https://github.com/brycedrennan/imaginAIry)
|
||||||
|
|
||||||
|
Offers a CLI and a Python API to generate images with Stable Diffusion. It has support for many tools, like image structure control (controlnet), instruction-based image edits (InstructPix2Pix), prompt-based masking (clipseg), among others.
|
||||||
|
|
||||||
|
Keywords: Stable Diffusion, CLI, Python API
|
||||||
|
|
||||||
|
## [sparseml](https://github.com/neuralmagic/sparseml)
|
||||||
|
|
||||||
|
SparseML is an open-source model optimization toolkit that enables you to create inference-optimized sparse models using pruning, quantization, and distillation algorithms. Models optimized with SparseML can then be exported to the ONNX and deployed with DeepSparse for GPU-class performance on CPU hardware.
|
||||||
|
|
||||||
|
Keywords: Model optimization, Pruning, Quantization, Distillation
|
||||||
|
|
||||||
|
## [opacus](https://github.com/pytorch/opacus)
|
||||||
|
|
||||||
|
Opacus is a library that enables training PyTorch models with differential privacy. It supports training with minimal code changes required on the client, has little impact on training performance, and allows the client to online track the privacy budget expended at any given moment.
|
||||||
|
|
||||||
|
Keywords: Differential privacy
|
||||||
|
|
||||||
|
## [LAVIS](https://github.com/salesforce/LAVIS)
|
||||||
|
|
||||||
|
[LAVIS](https://github.com/salesforce/LAVIS) is a Python deep learning library for LAnguage-and-VISion intelligence research and applications. This library aims to provide engineers and researchers with a one-stop solution to rapidly develop models for their specific multimodal scenarios, and benchmark them across standard and customized datasets. It features a unified interface design to access
|
||||||
|
|
||||||
|
Keywords: Multimodal, NLP, Vision
|
||||||
|
|
||||||
|
## [buzz](https://github.com/chidiwilliams/buzz)
|
||||||
|
|
||||||
|
Buzz transcribes and translates audio offline on your personal computer. Powered by OpenAI's Whisper.
|
||||||
|
|
||||||
|
Keywords: Audio transcription, Translation
|
||||||
|
|
||||||
|
## [rust-bert](https://github.com/guillaume-be/rust-bert)
|
||||||
|
|
||||||
|
Rust-native state-of-the-art Natural Language Processing models and pipelines. Port of Hugging Face's Transformers library, using the tch-rs crate and pre-processing from rust-tokenizers. Supports multi-threaded tokenization and GPU inference. This repository exposes the model base architecture, task-specific heads and ready-to-use pipelines.
|
||||||
|
|
||||||
|
Keywords: Rust, BERT, Inference
|
||||||
|
|
||||||
|
## [EasyNLP](https://github.com/alibaba/EasyNLP)
|
||||||
|
|
||||||
|
[EasyNLP](https://github.com/alibaba/EasyNLP) is an easy-to-use NLP development and application toolkit in PyTorch, first released inside Alibaba in 2021. It is built with scalable distributed training strategies and supports a comprehensive suite of NLP algorithms for various NLP applications. [EasyNLP](https://github.com/alibaba/EasyNLP) integrates knowledge distillation and few-shot learning for landing large pre-trained models, together with various popular multi-modality pre-trained models. It provides a unified framework of model training, inference, and deployment for real-world applications.
|
||||||
|
|
||||||
|
Keywords: NLP, Knowledge distillation, Few-shot learning, Multi-modality, Training, Inference, Deployment
|
||||||
|
|
||||||
|
## [TurboTransformers](https://github.com/Tencent/TurboTransformers)
|
||||||
|
|
||||||
|
A fast and user-friendly runtime for transformer inference (Bert, Albert, GPT2, Decoders, etc) on CPU and GPU.
|
||||||
|
|
||||||
|
Keywords: Optimization, Performance
|
||||||
|
|
||||||
|
## [hivemind](https://github.com/learning-at-home/hivemind)
|
||||||
|
|
||||||
|
Hivemind is a PyTorch library for decentralized deep learning across the Internet. Its intended usage is training one large model on hundreds of computers from different universities, companies, and volunteers.
|
||||||
|
|
||||||
|
Keywords: Decentralized training
|
||||||
|
|
||||||
|
## [docquery](https://github.com/impira/docquery)
|
||||||
|
|
||||||
|
DocQuery is a library and command-line tool that makes it easy to analyze semi-structured and unstructured documents (PDFs, scanned images, etc.) using large language models (LLMs). You simply point DocQuery at one or more documents and specify a question you want to ask. DocQuery is created by the team at Impira.
|
||||||
|
|
||||||
|
Keywords: Semi-structured documents, Unstructured documents, LLM, Document Question Answering
|
||||||
|
|
||||||
|
## [CodeGeeX](https://github.com/THUDM/CodeGeeX)
|
||||||
|
|
||||||
|
[CodeGeeX](https://github.com/THUDM/CodeGeeX) is a large-scale multilingual code generation model with 13 billion parameters, pre-trained on a large code corpus of more than 20 programming languages. It has several unique features:
|
||||||
|
- Multilingual code generation
|
||||||
|
- Crosslingual code translation
|
||||||
|
- Is a customizable programming assistant
|
||||||
|
|
||||||
|
Keywords: Code Generation Model
|
||||||
|
|
||||||
|
## [ktrain](https://github.com/amaiya/ktrain)
|
||||||
|
|
||||||
|
[ktrain](https://github.com/amaiya/ktrain) is a lightweight wrapper for the deep learning library TensorFlow Keras (and other libraries) to help build, train, and deploy neural networks and other machine learning models. Inspired by ML framework extensions like fastai and ludwig, [ktrain](https://github.com/amaiya/ktrain) is designed to make deep learning and AI more accessible and easier to apply for both newcomers and experienced practitioners.
|
||||||
|
|
||||||
|
Keywords: Keras wrapper, Model building, Training, Deployment
|
||||||
|
|
||||||
|
## [FastDeploy](https://github.com/PaddlePaddle/FastDeploy)
|
||||||
|
|
||||||
|
[FastDeploy](https://github.com/PaddlePaddle/FastDeploy) is an Easy-to-use and High Performance AI model deployment toolkit for Cloud, Mobile and Edge with packageout-of-the-box and unified experience, endend-to-end optimization for over fire160+ Text, Vision, Speech and Cross-modal AI models. Including image classification, object detection, OCR, face detection, matting, pp-tracking, NLP, stable diffusion, TTS and other tasks to meet developers' industrial deployment needs for multi-scenario, multi-hardware and multi-platform.
|
||||||
|
|
||||||
|
Keywords: Model deployment, CLoud, Mobile, Edge
|
||||||
|
|
||||||
|
## [underthesea](https://github.com/undertheseanlp/underthesea)
|
||||||
|
|
||||||
|
[underthesea](https://github.com/undertheseanlp/underthesea) is a Vietnamese NLP toolkit. Underthesea is a suite of open source Python modules data sets and tutorials supporting research and development in Vietnamese Natural Language Processing. We provides extremely easy API to quickly apply pretrained NLP models to your Vietnamese text, such as word segmentation, part-of-speech tagging (PoS), named entity recognition (NER), text classification and dependency parsing.
|
||||||
|
|
||||||
|
Keywords: Vietnamese, NLP
|
||||||
|
|
||||||
|
## [hasktorch](https://github.com/hasktorch/hasktorch)
|
||||||
|
|
||||||
|
Hasktorch is a library for tensors and neural networks in Haskell. It is an independent open source community project which leverages the core C++ libraries shared by PyTorch.
|
||||||
|
|
||||||
|
Keywords: Haskell, Neural Networks
|
||||||
|
|
||||||
|
## [donut](https://github.com/clovaai/donut)
|
||||||
|
|
||||||
|
Donut, or Document understanding transformer, is a new method of document understanding that utilizes an OCR-free end-to-end Transformer model.
|
||||||
|
|
||||||
|
Donut does not require off-the-shelf OCR engines/APIs, yet it shows state-of-the-art performances on various visual document understanding tasks, such as visual document classification or information extraction (a.k.a. document parsing).
|
||||||
|
|
||||||
|
Keywords: Document Understanding
|
||||||
|
|
||||||
|
## [transformers-interpret](https://github.com/cdpierse/transformers-interpret)
|
||||||
|
|
||||||
|
Transformers Interpret is a model explainability tool designed to work exclusively with the transformers package.
|
||||||
|
|
||||||
|
In line with the philosophy of the Transformers package Transformers Interpret allows any transformers model to be explained in just two lines. Explainers are available for both text and computer vision models. Visualizations are also available in notebooks and as savable png and html files
|
||||||
|
|
||||||
|
Keywords: Model interpretation, Visualization
|
||||||
|
|
||||||
|
## [mlrun](https://github.com/mlrun/mlrun)
|
||||||
|
|
||||||
|
MLRun is an open MLOps platform for quickly building and managing continuous ML applications across their lifecycle. MLRun integrates into your development and CI/CD environment and automates the delivery of production data, ML pipelines, and online applications, significantly reducing engineering efforts, time to production, and computation resources. With MLRun, you can choose any IDE on your local machine or on the cloud. MLRun breaks the silos between data, ML, software, and DevOps/MLOps teams, enabling collaboration and fast continuous improvements.
|
||||||
|
|
||||||
|
Keywords: MLOps
|
||||||
|
|
||||||
|
## [FederatedScope](https://github.com/alibaba/FederatedScope)
|
||||||
|
|
||||||
|
[FederatedScope](https://github.com/alibaba/FederatedScope) is a comprehensive federated learning platform that provides convenient usage and flexible customization for various federated learning tasks in both academia and industry. Based on an event-driven architecture, [FederatedScope](https://github.com/alibaba/FederatedScope) integrates rich collections of functionalities to satisfy the burgeoning demands from federated learning, and aims to build up an easy-to-use platform for promoting learning safely and effectively.
|
||||||
|
|
||||||
|
Keywords: Federated learning, Event-driven
|
||||||
|
|
||||||
|
## [pythainlp](https://github.com/PyThaiNLP/pythainlp)
|
||||||
|
|
||||||
|
PyThaiNLP is a Python package for text processing and linguistic analysis, similar to NLTK with focus on Thai language.
|
||||||
|
|
||||||
|
Keywords: Thai, NLP, NLTK
|
||||||
|
|
||||||
|
## [FlagAI](https://github.com/FlagAI-Open/FlagAI)
|
||||||
|
|
||||||
|
[FlagAI](https://github.com/FlagAI-Open/FlagAI) (Fast LArge-scale General AI models) is a fast, easy-to-use and extensible toolkit for large-scale model. Our goal is to support training, fine-tuning, and deployment of large-scale models on various downstream tasks with multi-modality.
|
||||||
|
|
||||||
|
Keywords: Large models, Training, Fine-tuning, Deployment, Multi-modal
|
||||||
|
|
||||||
|
## [pyserini](https://github.com/castorini/pyserini)
|
||||||
|
|
||||||
|
[pyserini](https://github.com/castorini/pyserini) is a Python toolkit for reproducible information retrieval research with sparse and dense representations. Retrieval using sparse representations is provided via integration with the group's Anserini IR toolkit. Retrieval using dense representations is provided via integration with Facebook's Faiss library.
|
||||||
|
|
||||||
|
Keywords: IR, Information Retrieval, Dense, Sparse
|
||||||
|
|
||||||
|
## [baal](https://github.com/baal-org/baal)
|
||||||
|
|
||||||
|
[baal](https://github.com/baal-org/baal) is an active learning library that supports both industrial applications and research usecases. [baal](https://github.com/baal-org/baal) currently supports Monte-Carlo Dropout, MCDropConnect, deep ensembles, and semi-supervised learning.
|
||||||
|
|
||||||
|
Keywords: Active Learning, Research, Labeling
|
||||||
|
|
||||||
|
## [cleanlab](https://github.com/cleanlab/cleanlab)
|
||||||
|
|
||||||
|
[cleanlab](https://github.com/cleanlab/cleanlab) is the standard data-centric AI package for data quality and machine learning with messy, real-world data and labels. For text, image, tabular, audio (among others) datasets, you can use cleanlab to automatically: detect data issues (outliers, label errors, near duplicates, etc), train robust ML models, infer consensus + annotator-quality for multi-annotator data, suggest data to (re)label next (active learning).
|
||||||
|
|
||||||
|
Keywords: Data-Centric AI, Data Quality, Noisy Labels, Outlier Detection, Active Learning
|
||||||
|
|
||||||
|
## [BentoML](https://github.com/bentoml/BentoML)
|
||||||
|
|
||||||
|
[BentoML](https://github.com/bentoml) is the unified framework for for building, shipping, and scaling production-ready AI applications incorporating traditional ML, pre-trained AI models, Generative and Large Language Models.
|
||||||
|
All Hugging Face models and pipelines can be seamlessly integrated into BentoML applications, enabling the running of models on the most suitable hardware and independent scaling based on usage.
|
||||||
|
|
||||||
|
Keywords: BentoML, Framework, Deployment, AI Applications
|
||||||
|
|
||||||
11
conftest.py
11
conftest.py
@ -20,6 +20,10 @@ import sys
|
|||||||
import warnings
|
import warnings
|
||||||
from os.path import abspath, dirname, join
|
from os.path import abspath, dirname, join
|
||||||
|
|
||||||
|
import _pytest
|
||||||
|
|
||||||
|
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
|
||||||
|
|
||||||
|
|
||||||
# allow having multiple repository checkouts and not needing to remember to rerun
|
# allow having multiple repository checkouts and not needing to remember to rerun
|
||||||
# 'pip install -e .[dev]' when switching between checkouts and running tests.
|
# 'pip install -e .[dev]' when switching between checkouts and running tests.
|
||||||
@ -38,7 +42,10 @@ def pytest_configure(config):
|
|||||||
config.addinivalue_line(
|
config.addinivalue_line(
|
||||||
"markers", "is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested"
|
"markers", "is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested"
|
||||||
)
|
)
|
||||||
|
config.addinivalue_line("markers", "is_pipeline_test: mark test to run only when pipelines are tested")
|
||||||
config.addinivalue_line("markers", "is_staging_test: mark test to run only in the staging environment")
|
config.addinivalue_line("markers", "is_staging_test: mark test to run only in the staging environment")
|
||||||
|
config.addinivalue_line("markers", "accelerate_tests: mark test that require accelerate")
|
||||||
|
config.addinivalue_line("markers", "tool_tests: mark the tool tests that are run on their specific schedule")
|
||||||
|
|
||||||
|
|
||||||
def pytest_addoption(parser):
|
def pytest_addoption(parser):
|
||||||
@ -62,7 +69,7 @@ def pytest_sessionfinish(session, exitstatus):
|
|||||||
|
|
||||||
|
|
||||||
# Doctest custom flag to ignore output.
|
# Doctest custom flag to ignore output.
|
||||||
IGNORE_RESULT = doctest.register_optionflag('IGNORE_RESULT')
|
IGNORE_RESULT = doctest.register_optionflag("IGNORE_RESULT")
|
||||||
|
|
||||||
OutputChecker = doctest.OutputChecker
|
OutputChecker = doctest.OutputChecker
|
||||||
|
|
||||||
@ -75,3 +82,5 @@ class CustomOutputChecker(OutputChecker):
|
|||||||
|
|
||||||
|
|
||||||
doctest.OutputChecker = CustomOutputChecker
|
doctest.OutputChecker = CustomOutputChecker
|
||||||
|
_pytest.doctest.DoctestModule = HfDoctestModule
|
||||||
|
doctest.DocTestParser = HfDocTestParser
|
||||||
|
|||||||
@ -1,4 +1,4 @@
|
|||||||
FROM nvidia/cuda:11.2.2-cudnn8-devel-ubuntu20.04
|
FROM nvidia/cuda:11.8.0-cudnn8-devel-ubuntu20.04
|
||||||
LABEL maintainer="Hugging Face"
|
LABEL maintainer="Hugging Face"
|
||||||
|
|
||||||
ARG DEBIAN_FRONTEND=noninteractive
|
ARG DEBIAN_FRONTEND=noninteractive
|
||||||
@ -9,11 +9,11 @@ SHELL ["sh", "-lc"]
|
|||||||
# The following `ARG` are mainly used to specify the versions explicitly & directly in this docker file, and not meant
|
# The following `ARG` are mainly used to specify the versions explicitly & directly in this docker file, and not meant
|
||||||
# to be used as arguments for docker build (so far).
|
# to be used as arguments for docker build (so far).
|
||||||
|
|
||||||
ARG PYTORCH='1.12.1'
|
ARG PYTORCH='2.0.1'
|
||||||
# (not always a valid torch version)
|
# (not always a valid torch version)
|
||||||
ARG INTEL_TORCH_EXT='1.11.0'
|
ARG INTEL_TORCH_EXT='1.11.0'
|
||||||
# Example: `cu102`, `cu113`, etc.
|
# Example: `cu102`, `cu113`, etc.
|
||||||
ARG CUDA='cu113'
|
ARG CUDA='cu118'
|
||||||
|
|
||||||
RUN apt update
|
RUN apt update
|
||||||
RUN apt install -y git libsndfile1-dev tesseract-ocr espeak-ng python3 python3-pip ffmpeg git-lfs
|
RUN apt install -y git libsndfile1-dev tesseract-ocr espeak-ng python3 python3-pip ffmpeg git-lfs
|
||||||
@ -32,16 +32,12 @@ RUN echo torch=$VERSION
|
|||||||
# TODO: We might need to specify proper versions that work with a specific torch version (especially for past CI).
|
# TODO: We might need to specify proper versions that work with a specific torch version (especially for past CI).
|
||||||
RUN [ "$PYTORCH" != "pre" ] && python3 -m pip install --no-cache-dir -U $VERSION torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/$CUDA || python3 -m pip install --no-cache-dir -U --pre torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/nightly/$CUDA
|
RUN [ "$PYTORCH" != "pre" ] && python3 -m pip install --no-cache-dir -U $VERSION torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/$CUDA || python3 -m pip install --no-cache-dir -U --pre torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/nightly/$CUDA
|
||||||
|
|
||||||
RUN python3 -m pip install --no-cache-dir -U tensorflow
|
RUN python3 -m pip install --no-cache-dir -U tensorflow==2.12 protobuf==3.20.3 tensorflow_text tensorflow_probability
|
||||||
RUN python3 -m pip install --no-cache-dir -U tensorflow_probability
|
|
||||||
RUN python3 -m pip uninstall -y flax jax
|
RUN python3 -m pip uninstall -y flax jax
|
||||||
|
|
||||||
# Use installed torch version for `torch-scatter` to avid to deal with PYTORCH='pre'.
|
RUN python3 -m pip install --no-cache-dir intel_extension_for_pytorch==$INTEL_TORCH_EXT+cpu -f https://developer.intel.com/ipex-whl-stable-cpu
|
||||||
# If torch is nightly version, the link is likely to be invalid, but the installation falls back to the latest torch-scatter
|
|
||||||
RUN python3 -m pip install --no-cache-dir torch-scatter -f https://data.pyg.org/whl/torch-$(python3 -c "from torch import version; print(version.__version__.split('+')[0])")+$CUDA.html
|
|
||||||
RUN python3 -m pip install --no-cache-dir intel_extension_for_pytorch==$INTEL_TORCH_EXT+cpu -f https://software.intel.com/ipex-whl-stable
|
|
||||||
|
|
||||||
RUN python3 -m pip install --no-cache-dir git+https://github.com/facebookresearch/detectron2.git pytesseract https://github.com/kpu/kenlm/archive/master.zip
|
RUN python3 -m pip install --no-cache-dir git+https://github.com/facebookresearch/detectron2.git pytesseract
|
||||||
RUN python3 -m pip install -U "itsdangerous<2.1.0"
|
RUN python3 -m pip install -U "itsdangerous<2.1.0"
|
||||||
|
|
||||||
RUN python3 -m pip install --no-cache-dir git+https://github.com/huggingface/accelerate@main#egg=accelerate
|
RUN python3 -m pip install --no-cache-dir git+https://github.com/huggingface/accelerate@main#egg=accelerate
|
||||||
@ -49,7 +45,14 @@ RUN python3 -m pip install --no-cache-dir git+https://github.com/huggingface/acc
|
|||||||
# Add bitsandbytes for mixed int8 testing
|
# Add bitsandbytes for mixed int8 testing
|
||||||
RUN python3 -m pip install --no-cache-dir bitsandbytes
|
RUN python3 -m pip install --no-cache-dir bitsandbytes
|
||||||
|
|
||||||
RUN python3 -m pip install --no-cache-dir decord
|
# For bettertransformer
|
||||||
|
RUN python3 -m pip install --no-cache-dir optimum
|
||||||
|
|
||||||
|
# For video model testing
|
||||||
|
RUN python3 -m pip install --no-cache-dir decord av==9.2.0
|
||||||
|
|
||||||
|
# For `dinat` model
|
||||||
|
RUN python3 -m pip install --no-cache-dir natten -f https://shi-labs.com/natten/wheels/$CUDA/
|
||||||
|
|
||||||
# When installing in editable mode, `transformers` is not recognized as a package.
|
# When installing in editable mode, `transformers` is not recognized as a package.
|
||||||
# this line must be added in order for python to be aware of transformers.
|
# this line must be added in order for python to be aware of transformers.
|
||||||
|
|||||||
@ -10,8 +10,7 @@ RUN apt-get -y update && apt-get install -y libsndfile1-dev && apt install -y te
|
|||||||
# Torch needs to be installed before deepspeed
|
# Torch needs to be installed before deepspeed
|
||||||
RUN python3 -m pip install --no-cache-dir ./transformers[deepspeed]
|
RUN python3 -m pip install --no-cache-dir ./transformers[deepspeed]
|
||||||
|
|
||||||
RUN python3 -m pip install --no-cache-dir torch-scatter -f https://data.pyg.org/whl/torch-$(python -c "from torch import version; print(version.__version__.split('+')[0])")+cpu.html
|
RUN python3 -m pip install --no-cache-dir torchvision git+https://github.com/facebookresearch/detectron2.git pytesseract
|
||||||
RUN python3 -m pip install --no-cache-dir torchvision git+https://github.com/facebookresearch/detectron2.git pytesseract https://github.com/kpu/kenlm/archive/master.zip
|
|
||||||
RUN python3 -m pip install --no-cache-dir pytorch-quantization --extra-index-url https://pypi.ngc.nvidia.com
|
RUN python3 -m pip install --no-cache-dir pytorch-quantization --extra-index-url https://pypi.ngc.nvidia.com
|
||||||
RUN python3 -m pip install -U "itsdangerous<2.1.0"
|
RUN python3 -m pip install -U "itsdangerous<2.1.0"
|
||||||
|
|
||||||
|
|||||||
@ -1,4 +1,4 @@
|
|||||||
ARG BASE_DOCKER_IMAGE="nvidia/cuda:11.2.2-cudnn8-devel-ubuntu20.04"
|
ARG BASE_DOCKER_IMAGE
|
||||||
FROM $BASE_DOCKER_IMAGE
|
FROM $BASE_DOCKER_IMAGE
|
||||||
LABEL maintainer="Hugging Face"
|
LABEL maintainer="Hugging Face"
|
||||||
|
|
||||||
@ -8,7 +8,7 @@ ARG DEBIAN_FRONTEND=noninteractive
|
|||||||
SHELL ["sh", "-lc"]
|
SHELL ["sh", "-lc"]
|
||||||
|
|
||||||
RUN apt update
|
RUN apt update
|
||||||
RUN apt install -y git libsndfile1-dev tesseract-ocr espeak-ng python3 python3-pip ffmpeg git-lfs
|
RUN apt install -y git libsndfile1-dev tesseract-ocr espeak-ng python3 python3-pip ffmpeg git-lfs libaio-dev
|
||||||
RUN git lfs install
|
RUN git lfs install
|
||||||
RUN python3 -m pip install --no-cache-dir --upgrade pip
|
RUN python3 -m pip install --no-cache-dir --upgrade pip
|
||||||
|
|
||||||
@ -23,9 +23,11 @@ RUN cd transformers && python3 setup.py develop
|
|||||||
ARG FRAMEWORK
|
ARG FRAMEWORK
|
||||||
ARG VERSION
|
ARG VERSION
|
||||||
|
|
||||||
|
# Control `setuptools` version to avoid some issues
|
||||||
|
RUN [ "$VERSION" != "1.10" ] && python3 -m pip install -U setuptools || python3 -m pip install -U "setuptools<=59.5"
|
||||||
|
|
||||||
# Remove all frameworks
|
# Remove all frameworks
|
||||||
# (`accelerate` requires `torch`, and this causes import issues for TF-only testing)
|
RUN python3 -m pip uninstall -y torch torchvision torchaudio tensorflow jax flax
|
||||||
RUN python3 -m pip uninstall -y torch torchvision torchaudio accelerate tensorflow jax flax
|
|
||||||
|
|
||||||
# Get the libraries and their versions to install, and write installation command to `~/.profile`.
|
# Get the libraries and their versions to install, and write installation command to `~/.profile`.
|
||||||
RUN python3 ./transformers/utils/past_ci_versions.py --framework $FRAMEWORK --version $VERSION
|
RUN python3 ./transformers/utils/past_ci_versions.py --framework $FRAMEWORK --version $VERSION
|
||||||
@ -34,10 +36,24 @@ RUN python3 ./transformers/utils/past_ci_versions.py --framework $FRAMEWORK --ve
|
|||||||
RUN echo "INSTALL_CMD = $INSTALL_CMD"
|
RUN echo "INSTALL_CMD = $INSTALL_CMD"
|
||||||
RUN $INSTALL_CMD
|
RUN $INSTALL_CMD
|
||||||
|
|
||||||
# Having installation problems for torch-scatter with torch <= 1.6. Disable so we have the same set of tests.
|
RUN [ "$FRAMEWORK" != "pytorch" ] && echo "`deepspeed-testing` installation is skipped" || python3 -m pip install --no-cache-dir ./transformers[deepspeed-testing]
|
||||||
# (This part will be removed once the logic of using `past_ci_versions.py` is used in other Dockerfile files.)
|
|
||||||
# # Use installed torch version for `torch-scatter`.
|
# Remove `accelerate`: it requires `torch`, and this causes import issues for TF-only testing
|
||||||
# # (The env. variable $CUDA is defined in `past_ci_versions.py`)
|
# We will install `accelerate@main` in Past CI workflow file
|
||||||
# RUN [ "$FRAMEWORK" = "pytorch" ] && python3 -m pip install --no-cache-dir torch-scatter -f https://data.pyg.org/whl/torch-$(python3 -c "from torch import version; print(version.__version__.split('+')[0])")+$CUDA.html || echo "torch-scatter not to be installed"
|
RUN python3 -m pip uninstall -y accelerate
|
||||||
|
|
||||||
|
# Uninstall `torch-tensorrt` and `apex` shipped with the base image
|
||||||
|
RUN python3 -m pip uninstall -y torch-tensorrt apex
|
||||||
|
|
||||||
|
# Pre-build **nightly** release of DeepSpeed, so it would be ready for testing (otherwise, the 1st deepspeed test will timeout)
|
||||||
|
RUN python3 -m pip uninstall -y deepspeed
|
||||||
|
# This has to be run inside the GPU VMs running the tests. (So far, it fails here due to GPU checks during compilation.)
|
||||||
|
# Issue: https://github.com/microsoft/DeepSpeed/issues/2010
|
||||||
|
# RUN git clone https://github.com/microsoft/DeepSpeed && cd DeepSpeed && rm -rf build && \
|
||||||
|
# DS_BUILD_CPU_ADAM=1 DS_BUILD_FUSED_ADAM=1 DS_BUILD_UTILS=1 python3 -m pip install . --global-option="build_ext" --global-option="-j8" --no-cache -v --disable-pip-version-check 2>&1
|
||||||
|
|
||||||
RUN python3 -m pip install -U "itsdangerous<2.1.0"
|
RUN python3 -m pip install -U "itsdangerous<2.1.0"
|
||||||
|
|
||||||
|
# When installing in editable mode, `transformers` is not recognized as a package.
|
||||||
|
# this line must be added in order for python to be aware of transformers.
|
||||||
|
RUN cd transformers && python3 setup.py develop
|
||||||
|
|||||||
@ -1,11 +1,12 @@
|
|||||||
FROM nvcr.io/nvidia/pytorch:21.03-py3
|
# https://docs.nvidia.com/deeplearning/frameworks/pytorch-release-notes/rel-22-12.html#rel-22-12
|
||||||
|
FROM nvcr.io/nvidia/pytorch:22.12-py3
|
||||||
LABEL maintainer="Hugging Face"
|
LABEL maintainer="Hugging Face"
|
||||||
|
|
||||||
ARG DEBIAN_FRONTEND=noninteractive
|
ARG DEBIAN_FRONTEND=noninteractive
|
||||||
|
|
||||||
ARG PYTORCH='1.12.1'
|
ARG PYTORCH='2.0.1'
|
||||||
# Example: `cu102`, `cu113`, etc.
|
# Example: `cu102`, `cu113`, etc.
|
||||||
ARG CUDA='cu113'
|
ARG CUDA='cu118'
|
||||||
|
|
||||||
RUN apt -y update
|
RUN apt -y update
|
||||||
RUN apt install -y libaio-dev
|
RUN apt install -y libaio-dev
|
||||||
@ -14,6 +15,8 @@ RUN python3 -m pip install --no-cache-dir --upgrade pip
|
|||||||
ARG REF=main
|
ARG REF=main
|
||||||
RUN git clone https://github.com/huggingface/transformers && cd transformers && git checkout $REF
|
RUN git clone https://github.com/huggingface/transformers && cd transformers && git checkout $REF
|
||||||
|
|
||||||
|
RUN python3 -m pip uninstall -y torch torchvision torchaudio
|
||||||
|
|
||||||
# Install latest release PyTorch
|
# Install latest release PyTorch
|
||||||
# (PyTorch must be installed before pre-compiling any DeepSpeed c++/cuda ops.)
|
# (PyTorch must be installed before pre-compiling any DeepSpeed c++/cuda ops.)
|
||||||
# (https://www.deepspeed.ai/tutorials/advanced-install/#pre-install-deepspeed-ops)
|
# (https://www.deepspeed.ai/tutorials/advanced-install/#pre-install-deepspeed-ops)
|
||||||
@ -21,15 +24,31 @@ RUN python3 -m pip install --no-cache-dir -U torch==$PYTORCH torchvision torchau
|
|||||||
|
|
||||||
RUN python3 -m pip install --no-cache-dir ./transformers[deepspeed-testing]
|
RUN python3 -m pip install --no-cache-dir ./transformers[deepspeed-testing]
|
||||||
|
|
||||||
|
RUN python3 -m pip install --no-cache-dir git+https://github.com/huggingface/accelerate@main#egg=accelerate
|
||||||
|
|
||||||
|
# Uninstall `transformer-engine` shipped with the base image
|
||||||
|
RUN python3 -m pip uninstall -y transformer-engine
|
||||||
|
|
||||||
|
# Uninstall `torch-tensorrt` shipped with the base image
|
||||||
|
RUN python3 -m pip uninstall -y torch-tensorrt
|
||||||
|
|
||||||
|
# recompile apex
|
||||||
|
RUN python3 -m pip uninstall -y apex
|
||||||
|
RUN git clone https://github.com/NVIDIA/apex
|
||||||
|
# `MAX_JOBS=1` disables parallel building to avoid cpu memory OOM when building image on GitHub Action (standard) runners
|
||||||
|
RUN cd apex && git checkout 82ee367f3da74b4cd62a1fb47aa9806f0f47b58b && MAX_JOBS=1 python3 -m pip install --global-option="--cpp_ext" --global-option="--cuda_ext" --no-cache -v --disable-pip-version-check .
|
||||||
|
|
||||||
# Pre-build **latest** DeepSpeed, so it would be ready for testing (otherwise, the 1st deepspeed test will timeout)
|
# Pre-build **latest** DeepSpeed, so it would be ready for testing (otherwise, the 1st deepspeed test will timeout)
|
||||||
RUN python3 -m pip uninstall -y deepspeed
|
RUN python3 -m pip uninstall -y deepspeed
|
||||||
# This has to be run (again) inside the GPU VMs running the tests.
|
# This has to be run (again) inside the GPU VMs running the tests.
|
||||||
# The installation works here, but some tests fail, if we don't pre-build deepspeed again in the VMs running the tests.
|
# The installation works here, but some tests fail, if we don't pre-build deepspeed again in the VMs running the tests.
|
||||||
# TODO: Find out why test fail.
|
# TODO: Find out why test fail.
|
||||||
RUN DS_BUILD_CPU_ADAM=1 DS_BUILD_FUSED_ADAM=1 DS_BUILD_AIO=1 DS_BUILD_UTILS=1 python3 -m pip install deepspeed --global-option="build_ext" --global-option="-j8" --no-cache -v --disable-pip-version-check 2>&1
|
RUN DS_BUILD_CPU_ADAM=1 DS_BUILD_FUSED_ADAM=1 DS_BUILD_UTILS=1 python3 -m pip install deepspeed --global-option="build_ext" --global-option="-j8" --no-cache -v --disable-pip-version-check 2>&1
|
||||||
|
|
||||||
# When installing in editable mode, `transformers` is not recognized as a package.
|
# When installing in editable mode, `transformers` is not recognized as a package.
|
||||||
# this line must be added in order for python to be aware of transformers.
|
# this line must be added in order for python to be aware of transformers.
|
||||||
RUN cd transformers && python3 setup.py develop
|
RUN cd transformers && python3 setup.py develop
|
||||||
|
|
||||||
|
# The base image ships with `pydantic==1.8.2` which is not working - i.e. the next command fails
|
||||||
|
RUN python3 -m pip install -U --no-cache-dir "pydantic<2"
|
||||||
RUN python3 -c "from deepspeed.launcher.runner import main"
|
RUN python3 -c "from deepspeed.launcher.runner import main"
|
||||||
|
|||||||
@ -1,10 +1,11 @@
|
|||||||
FROM nvcr.io/nvidia/pytorch:21.03-py3
|
# https://docs.nvidia.com/deeplearning/frameworks/pytorch-release-notes/rel-22-12.html#rel-22-12
|
||||||
|
FROM nvcr.io/nvidia/pytorch:22.12-py3
|
||||||
LABEL maintainer="Hugging Face"
|
LABEL maintainer="Hugging Face"
|
||||||
|
|
||||||
ARG DEBIAN_FRONTEND=noninteractive
|
ARG DEBIAN_FRONTEND=noninteractive
|
||||||
|
|
||||||
# Example: `cu102`, `cu113`, etc.
|
# Example: `cu102`, `cu113`, etc.
|
||||||
ARG CUDA='cu113'
|
ARG CUDA='cu118'
|
||||||
|
|
||||||
RUN apt -y update
|
RUN apt -y update
|
||||||
RUN apt install -y libaio-dev
|
RUN apt install -y libaio-dev
|
||||||
@ -13,6 +14,8 @@ RUN python3 -m pip install --no-cache-dir --upgrade pip
|
|||||||
ARG REF=main
|
ARG REF=main
|
||||||
RUN git clone https://github.com/huggingface/transformers && cd transformers && git checkout $REF
|
RUN git clone https://github.com/huggingface/transformers && cd transformers && git checkout $REF
|
||||||
|
|
||||||
|
RUN python3 -m pip uninstall -y torch torchvision torchaudio
|
||||||
|
|
||||||
# Install **nightly** release PyTorch (flag `--pre`)
|
# Install **nightly** release PyTorch (flag `--pre`)
|
||||||
# (PyTorch must be installed before pre-compiling any DeepSpeed c++/cuda ops.)
|
# (PyTorch must be installed before pre-compiling any DeepSpeed c++/cuda ops.)
|
||||||
# (https://www.deepspeed.ai/tutorials/advanced-install/#pre-install-deepspeed-ops)
|
# (https://www.deepspeed.ai/tutorials/advanced-install/#pre-install-deepspeed-ops)
|
||||||
@ -20,30 +23,38 @@ RUN python3 -m pip install --no-cache-dir -U --pre torch torchvision torchaudio
|
|||||||
|
|
||||||
RUN python3 -m pip install --no-cache-dir ./transformers[deepspeed-testing]
|
RUN python3 -m pip install --no-cache-dir ./transformers[deepspeed-testing]
|
||||||
|
|
||||||
|
RUN python3 -m pip install --no-cache-dir git+https://github.com/huggingface/accelerate@main#egg=accelerate
|
||||||
|
|
||||||
|
# Uninstall `transformer-engine` shipped with the base image
|
||||||
|
RUN python3 -m pip uninstall -y transformer-engine
|
||||||
|
|
||||||
|
# Uninstall `torch-tensorrt` and `apex` shipped with the base image
|
||||||
|
RUN python3 -m pip uninstall -y torch-tensorrt apex
|
||||||
|
|
||||||
# Pre-build **nightly** release of DeepSpeed, so it would be ready for testing (otherwise, the 1st deepspeed test will timeout)
|
# Pre-build **nightly** release of DeepSpeed, so it would be ready for testing (otherwise, the 1st deepspeed test will timeout)
|
||||||
RUN python3 -m pip uninstall -y deepspeed
|
RUN python3 -m pip uninstall -y deepspeed
|
||||||
# This has to be run inside the GPU VMs running the tests. (So far, it fails here due to GPU checks during compilation.)
|
# This has to be run inside the GPU VMs running the tests. (So far, it fails here due to GPU checks during compilation.)
|
||||||
# Issue: https://github.com/microsoft/DeepSpeed/issues/2010
|
# Issue: https://github.com/microsoft/DeepSpeed/issues/2010
|
||||||
# RUN git clone https://github.com/microsoft/DeepSpeed && cd DeepSpeed && rm -rf build && \
|
# RUN git clone https://github.com/microsoft/DeepSpeed && cd DeepSpeed && rm -rf build && \
|
||||||
# DS_BUILD_CPU_ADAM=1 DS_BUILD_FUSED_ADAM=1 DS_BUILD_AIO=1 DS_BUILD_UTILS=1 python3 -m pip install . --global-option="build_ext" --global-option="-j8" --no-cache -v --disable-pip-version-check 2>&1
|
# DS_BUILD_CPU_ADAM=1 DS_BUILD_FUSED_ADAM=1 DS_BUILD_UTILS=1 python3 -m pip install . --global-option="build_ext" --global-option="-j8" --no-cache -v --disable-pip-version-check 2>&1
|
||||||
|
|
||||||
# For `torchdynamo` tests
|
## For `torchdynamo` tests
|
||||||
# (see https://github.com/huggingface/transformers/pull/17765)
|
## (see https://github.com/huggingface/transformers/pull/17765)
|
||||||
RUN git clone https://github.com/pytorch/functorch
|
#RUN git clone https://github.com/pytorch/functorch
|
||||||
RUN python3 -m pip install --no-cache-dir ./functorch[aot]
|
#RUN python3 -m pip install --no-cache-dir ./functorch[aot]
|
||||||
RUN cd functorch && python3 setup.py develop
|
#RUN cd functorch && python3 setup.py develop
|
||||||
|
#
|
||||||
RUN git clone https://github.com/pytorch/torchdynamo
|
#RUN git clone https://github.com/pytorch/torchdynamo
|
||||||
RUN python3 -m pip install -r ./torchdynamo/requirements.txt
|
#RUN python3 -m pip install -r ./torchdynamo/requirements.txt
|
||||||
RUN cd torchdynamo && python3 setup.py develop
|
#RUN cd torchdynamo && python3 setup.py develop
|
||||||
|
#
|
||||||
# install TensorRT
|
## install TensorRT
|
||||||
RUN python3 -m pip install --no-cache-dir -U nvidia-pyindex
|
#RUN python3 -m pip install --no-cache-dir -U nvidia-pyindex
|
||||||
RUN python3 -m pip install --no-cache-dir -U nvidia-tensorrt==8.2.4.2
|
#RUN python3 -m pip install --no-cache-dir -U nvidia-tensorrt==8.2.4.2
|
||||||
|
#
|
||||||
# install torch_tensorrt (fx path)
|
## install torch_tensorrt (fx path)
|
||||||
RUN git clone https://github.com/pytorch/TensorRT.git
|
#RUN git clone https://github.com/pytorch/TensorRT.git
|
||||||
RUN cd TensorRT/py && python3 setup.py install --fx-only
|
#RUN cd TensorRT/py && python3 setup.py install --fx-only
|
||||||
|
|
||||||
# When installing in editable mode, `transformers` is not recognized as a package.
|
# When installing in editable mode, `transformers` is not recognized as a package.
|
||||||
# this line must be added in order for python to be aware of transformers.
|
# this line must be added in order for python to be aware of transformers.
|
||||||
|
|||||||
@ -1,4 +1,4 @@
|
|||||||
FROM nvidia/cuda:11.2.2-cudnn8-devel-ubuntu20.04
|
FROM nvidia/cuda:11.8.0-cudnn8-devel-ubuntu20.04
|
||||||
LABEL maintainer="Hugging Face"
|
LABEL maintainer="Hugging Face"
|
||||||
|
|
||||||
ARG DEBIAN_FRONTEND=noninteractive
|
ARG DEBIAN_FRONTEND=noninteractive
|
||||||
@ -9,21 +9,22 @@ RUN python3 -m pip install --no-cache-dir --upgrade pip
|
|||||||
|
|
||||||
ARG REF=main
|
ARG REF=main
|
||||||
RUN git clone https://github.com/huggingface/transformers && cd transformers && git checkout $REF
|
RUN git clone https://github.com/huggingface/transformers && cd transformers && git checkout $REF
|
||||||
RUN python3 -m pip install --no-cache-dir -e ./transformers[dev-torch,testing]
|
RUN python3 -m pip install --no-cache-dir -e ./transformers[dev-torch,testing,video]
|
||||||
|
|
||||||
# If set to nothing, will install the latest version
|
# If set to nothing, will install the latest version
|
||||||
ARG PYTORCH='1.12.1'
|
ARG PYTORCH='2.0.1'
|
||||||
ARG TORCH_VISION=''
|
ARG TORCH_VISION=''
|
||||||
ARG TORCH_AUDIO=''
|
ARG TORCH_AUDIO=''
|
||||||
|
# Example: `cu102`, `cu113`, etc.
|
||||||
|
ARG CUDA='cu118'
|
||||||
|
|
||||||
RUN [ ${#PYTORCH} -gt 0 ] && VERSION='torch=='$PYTORCH'.*' || VERSION='torch'; python3 -m pip install --no-cache-dir -U $VERSION --extra-index-url https://download.pytorch.org/whl/cu113
|
RUN [ ${#PYTORCH} -gt 0 ] && VERSION='torch=='$PYTORCH'.*' || VERSION='torch'; python3 -m pip install --no-cache-dir -U $VERSION --extra-index-url https://download.pytorch.org/whl/$CUDA
|
||||||
RUN [ ${#TORCH_VISION} -gt 0 ] && VERSION='torchvision=='TORCH_VISION'.*' || VERSION='torchvision'; python3 -m pip install --no-cache-dir -U $VERSION --extra-index-url https://download.pytorch.org/whl/cu113
|
RUN [ ${#TORCH_VISION} -gt 0 ] && VERSION='torchvision=='TORCH_VISION'.*' || VERSION='torchvision'; python3 -m pip install --no-cache-dir -U $VERSION --extra-index-url https://download.pytorch.org/whl/$CUDA
|
||||||
RUN [ ${#TORCH_AUDIO} -gt 0 ] && VERSION='torchaudio=='TORCH_AUDIO'.*' || VERSION='torchaudio'; python3 -m pip install --no-cache-dir -U $VERSION --extra-index-url https://download.pytorch.org/whl/cu113
|
RUN [ ${#TORCH_AUDIO} -gt 0 ] && VERSION='torchaudio=='TORCH_AUDIO'.*' || VERSION='torchaudio'; python3 -m pip install --no-cache-dir -U $VERSION --extra-index-url https://download.pytorch.org/whl/$CUDA
|
||||||
|
|
||||||
RUN python3 -m pip uninstall -y tensorflow flax
|
RUN python3 -m pip uninstall -y tensorflow flax
|
||||||
|
|
||||||
RUN python3 -m pip install --no-cache-dir torch-scatter -f https://data.pyg.org/whl/torch-$(python3 -c "from torch import version; print(version.__version__.split('+')[0])")+cu113.html
|
RUN python3 -m pip install --no-cache-dir git+https://github.com/facebookresearch/detectron2.git pytesseract
|
||||||
RUN python3 -m pip install --no-cache-dir git+https://github.com/facebookresearch/detectron2.git pytesseract https://github.com/kpu/kenlm/archive/master.zip
|
|
||||||
RUN python3 -m pip install -U "itsdangerous<2.1.0"
|
RUN python3 -m pip install -U "itsdangerous<2.1.0"
|
||||||
|
|
||||||
# When installing in editable mode, `transformers` is not recognized as a package.
|
# When installing in editable mode, `transformers` is not recognized as a package.
|
||||||
|
|||||||
@ -1,4 +1,4 @@
|
|||||||
FROM nvidia/cuda:11.2.2-cudnn8-devel-ubuntu20.04
|
FROM nvidia/cuda:11.8.0-cudnn8-devel-ubuntu20.04
|
||||||
LABEL maintainer="Hugging Face"
|
LABEL maintainer="Hugging Face"
|
||||||
|
|
||||||
ARG DEBIAN_FRONTEND=noninteractive
|
ARG DEBIAN_FRONTEND=noninteractive
|
||||||
@ -12,12 +12,14 @@ RUN git clone https://github.com/huggingface/transformers && cd transformers &&
|
|||||||
RUN python3 -m pip install --no-cache-dir -e ./transformers[dev-tensorflow,testing]
|
RUN python3 -m pip install --no-cache-dir -e ./transformers[dev-tensorflow,testing]
|
||||||
|
|
||||||
# If set to nothing, will install the latest version
|
# If set to nothing, will install the latest version
|
||||||
ARG TENSORFLOW=''
|
ARG TENSORFLOW='2.12'
|
||||||
|
|
||||||
RUN [ ${#TENSORFLOW} -gt 0 ] && VERSION='tensorflow=='$TENSORFLOW'.*' || VERSION='tensorflow'; python3 -m pip install --no-cache-dir -U $VERSION
|
RUN [ ${#TENSORFLOW} -gt 0 ] && VERSION='tensorflow=='$TENSORFLOW'.*' || VERSION='tensorflow'; python3 -m pip install --no-cache-dir -U $VERSION
|
||||||
RUN python3 -m pip uninstall -y torch flax
|
RUN python3 -m pip uninstall -y torch flax
|
||||||
RUN python3 -m pip install -U "itsdangerous<2.1.0"
|
RUN python3 -m pip install -U "itsdangerous<2.1.0"
|
||||||
|
|
||||||
|
RUN python3 -m pip install --no-cache-dir -U tensorflow_probability
|
||||||
|
|
||||||
# When installing in editable mode, `transformers` is not recognized as a package.
|
# When installing in editable mode, `transformers` is not recognized as a package.
|
||||||
# this line must be added in order for python to be aware of transformers.
|
# this line must be added in order for python to be aware of transformers.
|
||||||
RUN cd transformers && python3 setup.py develop
|
RUN cd transformers && python3 setup.py develop
|
||||||
|
|||||||
@ -81,7 +81,7 @@ The `preview` command only works with existing doc files. When you add a complet
|
|||||||
|
|
||||||
## Adding a new element to the navigation bar
|
## Adding a new element to the navigation bar
|
||||||
|
|
||||||
Accepted files are Markdown (.md or .mdx).
|
Accepted files are Markdown (.md or .md).
|
||||||
|
|
||||||
Create a file with its extension and put it in the source directory. You can then link it to the toc-tree by putting
|
Create a file with its extension and put it in the source directory. You can then link it to the toc-tree by putting
|
||||||
the filename without the extension in the [`_toctree.yml`](https://github.com/huggingface/transformers/blob/main/docs/source/_toctree.yml) file.
|
the filename without the extension in the [`_toctree.yml`](https://github.com/huggingface/transformers/blob/main/docs/source/_toctree.yml) file.
|
||||||
@ -90,7 +90,7 @@ the filename without the extension in the [`_toctree.yml`](https://github.com/hu
|
|||||||
|
|
||||||
It helps to keep the old links working when renaming the section header and/or moving sections from one document to another. This is because the old links are likely to be used in Issues, Forums, and Social media and it'd make for a much more superior user experience if users reading those months later could still easily navigate to the originally intended information.
|
It helps to keep the old links working when renaming the section header and/or moving sections from one document to another. This is because the old links are likely to be used in Issues, Forums, and Social media and it'd make for a much more superior user experience if users reading those months later could still easily navigate to the originally intended information.
|
||||||
|
|
||||||
Therefore we simply keep a little map of moved sections at the end of the document where the original section was. The key is to preserve the original anchor.
|
Therefore, we simply keep a little map of moved sections at the end of the document where the original section was. The key is to preserve the original anchor.
|
||||||
|
|
||||||
So if you renamed a section from: "Section A" to "Section B", then you can add at the end of the file:
|
So if you renamed a section from: "Section A" to "Section B", then you can add at the end of the file:
|
||||||
|
|
||||||
@ -109,7 +109,7 @@ Sections that were moved:
|
|||||||
|
|
||||||
Use the relative style to link to the new file so that the versioned docs continue to work.
|
Use the relative style to link to the new file so that the versioned docs continue to work.
|
||||||
|
|
||||||
For an example of a rich moved section set please see the very end of [the Trainer doc](https://github.com/huggingface/transformers/blob/main/docs/source/en/main_classes/trainer.mdx).
|
For an example of a rich moved section set please see the very end of [the Trainer doc](https://github.com/huggingface/transformers/blob/main/docs/source/en/main_classes/trainer.md).
|
||||||
|
|
||||||
|
|
||||||
## Writing Documentation - Specification
|
## Writing Documentation - Specification
|
||||||
@ -138,7 +138,7 @@ When translating, refer to the guide at [./TRANSLATING.md](https://github.com/hu
|
|||||||
|
|
||||||
When adding a new model:
|
When adding a new model:
|
||||||
|
|
||||||
- Create a file `xxx.mdx` or under `./source/model_doc` (don't hesitate to copy an existing file as template).
|
- Create a file `xxx.md` or under `./source/model_doc` (don't hesitate to copy an existing file as template).
|
||||||
- Link that file in `./source/_toctree.yml`.
|
- Link that file in `./source/_toctree.yml`.
|
||||||
- Write a short overview of the model:
|
- Write a short overview of the model:
|
||||||
- Overview with paper & authors
|
- Overview with paper & authors
|
||||||
@ -354,7 +354,7 @@ The docstring should give a minimal, clear example of how the respective model
|
|||||||
is to be used in inference and also include the expected (ideally sensible)
|
is to be used in inference and also include the expected (ideally sensible)
|
||||||
output.
|
output.
|
||||||
Often, readers will try out the example before even going through the function
|
Often, readers will try out the example before even going through the function
|
||||||
or class definitions. Therefore it is of utmost importance that the example
|
or class definitions. Therefore, it is of utmost importance that the example
|
||||||
works as expected.
|
works as expected.
|
||||||
|
|
||||||
## Docstring testing
|
## Docstring testing
|
||||||
@ -369,20 +369,7 @@ contains the example docstring to the [documentation_tests.txt](../utils/documen
|
|||||||
|
|
||||||
### For Python files
|
### For Python files
|
||||||
|
|
||||||
You will first need to run the following command (from the root of the repository) to prepare the doc file (doc-testing needs to add additional lines that we don't include in the doc source files):
|
Run all the tests in the docstrings of a given file with the following command, here is how we test the modeling file of Wav2Vec2 for instance:
|
||||||
|
|
||||||
```bash
|
|
||||||
python utils/prepare_for_doc_test.py src docs
|
|
||||||
```
|
|
||||||
|
|
||||||
If you work on a specific python module, say `modeling_wav2vec2.py`, you can run the command as follows (to avoid the unnecessary temporary changes in irrelevant files):
|
|
||||||
|
|
||||||
```bash
|
|
||||||
python utils/prepare_for_doc_test.py src/transformers/utils/doc.py src/transformers/models/wav2vec2/modeling_wav2vec2.py
|
|
||||||
```
|
|
||||||
(`utils/doc.py` should always be included)
|
|
||||||
|
|
||||||
Then you can run all the tests in the docstrings of a given file with the following command, here is how we test the modeling file of Wav2Vec2 for instance:
|
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
pytest --doctest-modules src/transformers/models/wav2vec2/modeling_wav2vec2.py -sv --doctest-continue-on-failure
|
pytest --doctest-modules src/transformers/models/wav2vec2/modeling_wav2vec2.py -sv --doctest-continue-on-failure
|
||||||
@ -394,30 +381,12 @@ If you want to isolate a specific docstring, just add `::` after the file name t
|
|||||||
pytest --doctest-modules src/transformers/models/wav2vec2/modeling_wav2vec2.py::transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForCTC.forward -sv --doctest-continue-on-failure
|
pytest --doctest-modules src/transformers/models/wav2vec2/modeling_wav2vec2.py::transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForCTC.forward -sv --doctest-continue-on-failure
|
||||||
```
|
```
|
||||||
|
|
||||||
Once you're done, you can run the following command (still from the root of the repository) to undo the changes made by the first command before committing:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
python utils/prepare_for_doc_test.py src docs --remove_new_line
|
|
||||||
```
|
|
||||||
|
|
||||||
### For Markdown files
|
### For Markdown files
|
||||||
|
|
||||||
You will first need to run the following command (from the root of the repository) to prepare the doc file (doc-testing needs to add additional lines that we don't include in the doc source files):
|
You can test locally a given file with this command (here testing the quicktour):
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
python utils/prepare_for_doc_test.py src docs
|
pytest --doctest-modules docs/source/quicktour.md -sv --doctest-continue-on-failure --doctest-glob="*.md"
|
||||||
```
|
|
||||||
|
|
||||||
Then you can test locally a given file with this command (here testing the quicktour):
|
|
||||||
|
|
||||||
```bash
|
|
||||||
pytest --doctest-modules docs/source/quicktour.mdx -sv --doctest-continue-on-failure --doctest-glob="*.mdx"
|
|
||||||
```
|
|
||||||
|
|
||||||
Once you're done, you can run the following command (still from the root of the repository) to undo the changes made by the first command before committing:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
python utils/prepare_for_doc_test.py src docs --remove_new_line
|
|
||||||
```
|
```
|
||||||
|
|
||||||
### Writing doctests
|
### Writing doctests
|
||||||
|
|||||||
@ -1,7 +1,7 @@
|
|||||||
# docstyle-ignore
|
# docstyle-ignore
|
||||||
INSTALL_CONTENT = """
|
INSTALL_CONTENT = """
|
||||||
# Transformers installation
|
# Transformers installation
|
||||||
! pip install transformers datasets
|
! pip install transformers datasets evaluate
|
||||||
# To install from source instead of the last release, comment the command above and uncomment the following one.
|
# To install from source instead of the last release, comment the command above and uncomment the following one.
|
||||||
# ! pip install git+https://github.com/huggingface/transformers.git
|
# ! pip install git+https://github.com/huggingface/transformers.git
|
||||||
"""
|
"""
|
||||||
|
|||||||
@ -8,6 +8,10 @@ http://www.apache.org/licenses/LICENSE-2.0
|
|||||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
specific language governing permissions and limitations under the License.
|
specific language governing permissions and limitations under the License.
|
||||||
|
|
||||||
|
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||||
|
rendered properly in your Markdown viewer.
|
||||||
|
|
||||||
-->
|
-->
|
||||||
|
|
||||||
# Verteiltes Training mit 🤗 Accelerate
|
# Verteiltes Training mit 🤗 Accelerate
|
||||||
@ -8,6 +8,10 @@ http://www.apache.org/licenses/LICENSE-2.0
|
|||||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
specific language governing permissions and limitations under the License.
|
specific language governing permissions and limitations under the License.
|
||||||
|
|
||||||
|
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||||
|
rendered properly in your Markdown viewer.
|
||||||
|
|
||||||
-->
|
-->
|
||||||
|
|
||||||
# Vortrainierte Instanzen mit einer AutoClass laden
|
# Vortrainierte Instanzen mit einer AutoClass laden
|
||||||
@ -8,6 +8,10 @@ http://www.apache.org/licenses/LICENSE-2.0
|
|||||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
specific language governing permissions and limitations under the License.
|
specific language governing permissions and limitations under the License.
|
||||||
|
|
||||||
|
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||||
|
rendered properly in your Markdown viewer.
|
||||||
|
|
||||||
-->
|
-->
|
||||||
|
|
||||||
# 🤗 Transformers
|
# 🤗 Transformers
|
||||||
@ -28,8 +32,8 @@ Jede 🤗 Transformers-Architektur ist in einem eigenständigen Python-Modul def
|
|||||||
## Wenn Sie auf der Suche nach individueller Unterstützung durch das Hugging Face-Team sind
|
## Wenn Sie auf der Suche nach individueller Unterstützung durch das Hugging Face-Team sind
|
||||||
|
|
||||||
<a target="_blank" href="https://huggingface.co/support">
|
<a target="_blank" href="https://huggingface.co/support">
|
||||||
<img alt="HuggingFace Expert Acceleration Program" src="https://cdn-media.huggingface.co/marketing/transformers/new-support-improved.png" style="max-width: 600px; border: 1px solid #eee; border-radius: 4px; box-shadow: 0 1px 2px 0 rgba(0, 0, 0, 0.05);">
|
<img alt="HuggingFace Expert Acceleration Program" src="https://cdn-media.huggingface.co/marketing/transformers/new-support-improved.png" style="width: 100%; max-width: 600px; border: 1px solid #eee; border-radius: 4px; box-shadow: 0 1px 2px 0 rgba(0, 0, 0, 0.05);">
|
||||||
</a><br>
|
</a>
|
||||||
|
|
||||||
## Inhalt
|
## Inhalt
|
||||||
|
|
||||||
@ -52,6 +56,7 @@ Die Bibliothek enthält derzeit JAX-, PyTorch- und TensorFlow-Implementierungen,
|
|||||||
<!--This list is updated automatically from the README with _make fix-copies_. Do not update manually! -->
|
<!--This list is updated automatically from the README with _make fix-copies_. Do not update manually! -->
|
||||||
|
|
||||||
1. **[ALBERT](model_doc/albert)** (from Google Research and the Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.
|
1. **[ALBERT](model_doc/albert)** (from Google Research and the Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.
|
||||||
|
1. **[ALIGN](model_doc/align)** (from Google Research) released with the paper [Scaling Up Visual and Vision-Language Representation Learning With Noisy Text Supervision](https://arxiv.org/abs/2102.05918) by Chao Jia, Yinfei Yang, Ye Xia, Yi-Ting Chen, Zarana Parekh, Hieu Pham, Quoc V. Le, Yunhsuan Sung, Zhen Li, Tom Duerig.
|
||||||
1. **[BART](model_doc/bart)** (from Facebook) released with the paper [BART: Denoising Sequence-to-Sequence Pre-training for Natural Language Generation, Translation, and Comprehension](https://arxiv.org/abs/1910.13461) by Mike Lewis, Yinhan Liu, Naman Goyal, Marjan Ghazvininejad, Abdelrahman Mohamed, Omer Levy, Ves Stoyanov and Luke Zettlemoyer.
|
1. **[BART](model_doc/bart)** (from Facebook) released with the paper [BART: Denoising Sequence-to-Sequence Pre-training for Natural Language Generation, Translation, and Comprehension](https://arxiv.org/abs/1910.13461) by Mike Lewis, Yinhan Liu, Naman Goyal, Marjan Ghazvininejad, Abdelrahman Mohamed, Omer Levy, Ves Stoyanov and Luke Zettlemoyer.
|
||||||
1. **[BARThez](model_doc/barthez)** (from École polytechnique) released with the paper [BARThez: a Skilled Pretrained French Sequence-to-Sequence Model](https://arxiv.org/abs/2010.12321) by Moussa Kamal Eddine, Antoine J.-P. Tixier, Michalis Vazirgiannis.
|
1. **[BARThez](model_doc/barthez)** (from École polytechnique) released with the paper [BARThez: a Skilled Pretrained French Sequence-to-Sequence Model](https://arxiv.org/abs/2010.12321) by Moussa Kamal Eddine, Antoine J.-P. Tixier, Michalis Vazirgiannis.
|
||||||
1. **[BARTpho](model_doc/bartpho)** (from VinAI Research) released with the paper [BARTpho: Pre-trained Sequence-to-Sequence Models for Vietnamese](https://arxiv.org/abs/2109.09701) by Nguyen Luong Tran, Duong Minh Le and Dat Quoc Nguyen.
|
1. **[BARTpho](model_doc/bartpho)** (from VinAI Research) released with the paper [BARTpho: Pre-trained Sequence-to-Sequence Models for Vietnamese](https://arxiv.org/abs/2109.09701) by Nguyen Luong Tran, Duong Minh Le and Dat Quoc Nguyen.
|
||||||
@ -63,7 +68,7 @@ Die Bibliothek enthält derzeit JAX-, PyTorch- und TensorFlow-Implementierungen,
|
|||||||
1. **[BigBird-RoBERTa](model_doc/big_bird)** (from Google Research) released with the paper [Big Bird: Transformers for Longer Sequences](https://arxiv.org/abs/2007.14062) by Manzil Zaheer, Guru Guruganesh, Avinava Dubey, Joshua Ainslie, Chris Alberti, Santiago Ontanon, Philip Pham, Anirudh Ravula, Qifan Wang, Li Yang, Amr Ahmed.
|
1. **[BigBird-RoBERTa](model_doc/big_bird)** (from Google Research) released with the paper [Big Bird: Transformers for Longer Sequences](https://arxiv.org/abs/2007.14062) by Manzil Zaheer, Guru Guruganesh, Avinava Dubey, Joshua Ainslie, Chris Alberti, Santiago Ontanon, Philip Pham, Anirudh Ravula, Qifan Wang, Li Yang, Amr Ahmed.
|
||||||
1. **[Blenderbot](model_doc/blenderbot)** (from Facebook) released with the paper [Recipes for building an open-domain chatbot](https://arxiv.org/abs/2004.13637) by Stephen Roller, Emily Dinan, Naman Goyal, Da Ju, Mary Williamson, Yinhan Liu, Jing Xu, Myle Ott, Kurt Shuster, Eric M. Smith, Y-Lan Boureau, Jason Weston.
|
1. **[Blenderbot](model_doc/blenderbot)** (from Facebook) released with the paper [Recipes for building an open-domain chatbot](https://arxiv.org/abs/2004.13637) by Stephen Roller, Emily Dinan, Naman Goyal, Da Ju, Mary Williamson, Yinhan Liu, Jing Xu, Myle Ott, Kurt Shuster, Eric M. Smith, Y-Lan Boureau, Jason Weston.
|
||||||
1. **[BlenderbotSmall](model_doc/blenderbot-small)** (from Facebook) released with the paper [Recipes for building an open-domain chatbot](https://arxiv.org/abs/2004.13637) by Stephen Roller, Emily Dinan, Naman Goyal, Da Ju, Mary Williamson, Yinhan Liu, Jing Xu, Myle Ott, Kurt Shuster, Eric M. Smith, Y-Lan Boureau, Jason Weston.
|
1. **[BlenderbotSmall](model_doc/blenderbot-small)** (from Facebook) released with the paper [Recipes for building an open-domain chatbot](https://arxiv.org/abs/2004.13637) by Stephen Roller, Emily Dinan, Naman Goyal, Da Ju, Mary Williamson, Yinhan Liu, Jing Xu, Myle Ott, Kurt Shuster, Eric M. Smith, Y-Lan Boureau, Jason Weston.
|
||||||
1. **[BLOOM](model_doc/bloom)** (from BigScience workshop) released by the [BigSicence Workshop](https://bigscience.huggingface.co/).
|
1. **[BLOOM](model_doc/bloom)** (from BigScience workshop) released by the [BigScience Workshop](https://bigscience.huggingface.co/).
|
||||||
1. **[BORT](model_doc/bort)** (from Alexa) released with the paper [Optimal Subarchitecture Extraction For BERT](https://arxiv.org/abs/2010.10499) by Adrian de Wynter and Daniel J. Perry.
|
1. **[BORT](model_doc/bort)** (from Alexa) released with the paper [Optimal Subarchitecture Extraction For BERT](https://arxiv.org/abs/2010.10499) by Adrian de Wynter and Daniel J. Perry.
|
||||||
1. **[ByT5](model_doc/byt5)** (from Google Research) released with the paper [ByT5: Towards a token-free future with pre-trained byte-to-byte models](https://arxiv.org/abs/2105.13626) by Linting Xue, Aditya Barua, Noah Constant, Rami Al-Rfou, Sharan Narang, Mihir Kale, Adam Roberts, Colin Raffel.
|
1. **[ByT5](model_doc/byt5)** (from Google Research) released with the paper [ByT5: Towards a token-free future with pre-trained byte-to-byte models](https://arxiv.org/abs/2105.13626) by Linting Xue, Aditya Barua, Noah Constant, Rami Al-Rfou, Sharan Narang, Mihir Kale, Adam Roberts, Colin Raffel.
|
||||||
1. **[CamemBERT](model_doc/camembert)** (from Inria/Facebook/Sorbonne) released with the paper [CamemBERT: a Tasty French Language Model](https://arxiv.org/abs/1911.03894) by Louis Martin*, Benjamin Muller*, Pedro Javier Ortiz Suárez*, Yoann Dupont, Laurent Romary, Éric Villemonte de la Clergerie, Djamé Seddah and Benoît Sagot.
|
1. **[CamemBERT](model_doc/camembert)** (from Inria/Facebook/Sorbonne) released with the paper [CamemBERT: a Tasty French Language Model](https://arxiv.org/abs/1911.03894) by Louis Martin*, Benjamin Muller*, Pedro Javier Ortiz Suárez*, Yoann Dupont, Laurent Romary, Éric Villemonte de la Clergerie, Djamé Seddah and Benoît Sagot.
|
||||||
@ -72,6 +77,7 @@ Die Bibliothek enthält derzeit JAX-, PyTorch- und TensorFlow-Implementierungen,
|
|||||||
1. **[CodeGen](model_doc/codegen)** (from Salesforce) released with the paper [A Conversational Paradigm for Program Synthesis](https://arxiv.org/abs/2203.13474) by Erik Nijkamp, Bo Pang, Hiroaki Hayashi, Lifu Tu, Huan Wang, Yingbo Zhou, Silvio Savarese, Caiming Xiong.
|
1. **[CodeGen](model_doc/codegen)** (from Salesforce) released with the paper [A Conversational Paradigm for Program Synthesis](https://arxiv.org/abs/2203.13474) by Erik Nijkamp, Bo Pang, Hiroaki Hayashi, Lifu Tu, Huan Wang, Yingbo Zhou, Silvio Savarese, Caiming Xiong.
|
||||||
1. **[ConvBERT](model_doc/convbert)** (from YituTech) released with the paper [ConvBERT: Improving BERT with Span-based Dynamic Convolution](https://arxiv.org/abs/2008.02496) by Zihang Jiang, Weihao Yu, Daquan Zhou, Yunpeng Chen, Jiashi Feng, Shuicheng Yan.
|
1. **[ConvBERT](model_doc/convbert)** (from YituTech) released with the paper [ConvBERT: Improving BERT with Span-based Dynamic Convolution](https://arxiv.org/abs/2008.02496) by Zihang Jiang, Weihao Yu, Daquan Zhou, Yunpeng Chen, Jiashi Feng, Shuicheng Yan.
|
||||||
1. **[ConvNeXT](model_doc/convnext)** (from Facebook AI) released with the paper [A ConvNet for the 2020s](https://arxiv.org/abs/2201.03545) by Zhuang Liu, Hanzi Mao, Chao-Yuan Wu, Christoph Feichtenhofer, Trevor Darrell, Saining Xie.
|
1. **[ConvNeXT](model_doc/convnext)** (from Facebook AI) released with the paper [A ConvNet for the 2020s](https://arxiv.org/abs/2201.03545) by Zhuang Liu, Hanzi Mao, Chao-Yuan Wu, Christoph Feichtenhofer, Trevor Darrell, Saining Xie.
|
||||||
|
1. **[ConvNeXTV2](model_doc/convnextv2)** (from Facebook AI) released with the paper [ConvNeXt V2: Co-designing and Scaling ConvNets with Masked Autoencoders](https://arxiv.org/abs/2301.00808) by Sanghyun Woo, Shoubhik Debnath, Ronghang Hu, Xinlei Chen, Zhuang Liu, In So Kweon, Saining Xie.
|
||||||
1. **[CPM](model_doc/cpm)** (from Tsinghua University) released with the paper [CPM: A Large-scale Generative Chinese Pre-trained Language Model](https://arxiv.org/abs/2012.00413) by Zhengyan Zhang, Xu Han, Hao Zhou, Pei Ke, Yuxian Gu, Deming Ye, Yujia Qin, Yusheng Su, Haozhe Ji, Jian Guan, Fanchao Qi, Xiaozhi Wang, Yanan Zheng, Guoyang Zeng, Huanqi Cao, Shengqi Chen, Daixuan Li, Zhenbo Sun, Zhiyuan Liu, Minlie Huang, Wentao Han, Jie Tang, Juanzi Li, Xiaoyan Zhu, Maosong Sun.
|
1. **[CPM](model_doc/cpm)** (from Tsinghua University) released with the paper [CPM: A Large-scale Generative Chinese Pre-trained Language Model](https://arxiv.org/abs/2012.00413) by Zhengyan Zhang, Xu Han, Hao Zhou, Pei Ke, Yuxian Gu, Deming Ye, Yujia Qin, Yusheng Su, Haozhe Ji, Jian Guan, Fanchao Qi, Xiaozhi Wang, Yanan Zheng, Guoyang Zeng, Huanqi Cao, Shengqi Chen, Daixuan Li, Zhenbo Sun, Zhiyuan Liu, Minlie Huang, Wentao Han, Jie Tang, Juanzi Li, Xiaoyan Zhu, Maosong Sun.
|
||||||
1. **[CTRL](model_doc/ctrl)** (from Salesforce) released with the paper [CTRL: A Conditional Transformer Language Model for Controllable Generation](https://arxiv.org/abs/1909.05858) by Nitish Shirish Keskar*, Bryan McCann*, Lav R. Varshney, Caiming Xiong and Richard Socher.
|
1. **[CTRL](model_doc/ctrl)** (from Salesforce) released with the paper [CTRL: A Conditional Transformer Language Model for Controllable Generation](https://arxiv.org/abs/1909.05858) by Nitish Shirish Keskar*, Bryan McCann*, Lav R. Varshney, Caiming Xiong and Richard Socher.
|
||||||
1. **[CvT](model_doc/cvt)** (from Microsoft) released with the paper [CvT: Introducing Convolutions to Vision Transformers](https://arxiv.org/abs/2103.15808) by Haiping Wu, Bin Xiao, Noel Codella, Mengchen Liu, Xiyang Dai, Lu Yuan, Lei Zhang.
|
1. **[CvT](model_doc/cvt)** (from Microsoft) released with the paper [CvT: Introducing Convolutions to Vision Transformers](https://arxiv.org/abs/2103.15808) by Haiping Wu, Bin Xiao, Noel Codella, Mengchen Liu, Xiyang Dai, Lu Yuan, Lei Zhang.
|
||||||
@ -86,6 +92,7 @@ Die Bibliothek enthält derzeit JAX-, PyTorch- und TensorFlow-Implementierungen,
|
|||||||
1. **[DiT](model_doc/dit)** (from Microsoft Research) released with the paper [DiT: Self-supervised Pre-training for Document Image Transformer](https://arxiv.org/abs/2203.02378) by Junlong Li, Yiheng Xu, Tengchao Lv, Lei Cui, Cha Zhang, Furu Wei.
|
1. **[DiT](model_doc/dit)** (from Microsoft Research) released with the paper [DiT: Self-supervised Pre-training for Document Image Transformer](https://arxiv.org/abs/2203.02378) by Junlong Li, Yiheng Xu, Tengchao Lv, Lei Cui, Cha Zhang, Furu Wei.
|
||||||
1. **[DPR](model_doc/dpr)** (from Facebook) released with the paper [Dense Passage Retrieval for Open-Domain Question Answering](https://arxiv.org/abs/2004.04906) by Vladimir Karpukhin, Barlas Oğuz, Sewon Min, Patrick Lewis, Ledell Wu, Sergey Edunov, Danqi Chen, and Wen-tau Yih.
|
1. **[DPR](model_doc/dpr)** (from Facebook) released with the paper [Dense Passage Retrieval for Open-Domain Question Answering](https://arxiv.org/abs/2004.04906) by Vladimir Karpukhin, Barlas Oğuz, Sewon Min, Patrick Lewis, Ledell Wu, Sergey Edunov, Danqi Chen, and Wen-tau Yih.
|
||||||
1. **[DPT](master/model_doc/dpt)** (from Intel Labs) released with the paper [Vision Transformers for Dense Prediction](https://arxiv.org/abs/2103.13413) by René Ranftl, Alexey Bochkovskiy, Vladlen Koltun.
|
1. **[DPT](master/model_doc/dpt)** (from Intel Labs) released with the paper [Vision Transformers for Dense Prediction](https://arxiv.org/abs/2103.13413) by René Ranftl, Alexey Bochkovskiy, Vladlen Koltun.
|
||||||
|
1. **[EfficientNet](model_doc/efficientnet)** (from Google Research) released with the paper [EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks](https://arxiv.org/abs/1905.11946) by Mingxing Tan and Quoc V. Le.
|
||||||
1. **[ELECTRA](model_doc/electra)** (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang Luong, Quoc V. Le, Christopher D. Manning.
|
1. **[ELECTRA](model_doc/electra)** (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang Luong, Quoc V. Le, Christopher D. Manning.
|
||||||
1. **[EncoderDecoder](model_doc/encoder-decoder)** (from Google Research) released with the paper [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://arxiv.org/abs/1907.12461) by Sascha Rothe, Shashi Narayan, Aliaksei Severyn.
|
1. **[EncoderDecoder](model_doc/encoder-decoder)** (from Google Research) released with the paper [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://arxiv.org/abs/1907.12461) by Sascha Rothe, Shashi Narayan, Aliaksei Severyn.
|
||||||
1. **[FlauBERT](model_doc/flaubert)** (from CNRS) released with the paper [FlauBERT: Unsupervised Language Model Pre-training for French](https://arxiv.org/abs/1912.05372) by Hang Le, Loïc Vial, Jibril Frej, Vincent Segonne, Maximin Coavoux, Benjamin Lecouteux, Alexandre Allauzen, Benoît Crabbé, Laurent Besacier, Didier Schwab.
|
1. **[FlauBERT](model_doc/flaubert)** (from CNRS) released with the paper [FlauBERT: Unsupervised Language Model Pre-training for French](https://arxiv.org/abs/1912.05372) by Hang Le, Loïc Vial, Jibril Frej, Vincent Segonne, Maximin Coavoux, Benjamin Lecouteux, Alexandre Allauzen, Benoît Crabbé, Laurent Besacier, Didier Schwab.
|
||||||
@ -98,6 +105,7 @@ Die Bibliothek enthält derzeit JAX-, PyTorch- und TensorFlow-Implementierungen,
|
|||||||
1. **[GPT NeoX](model_doc/gpt_neox)** (from EleutherAI) released with the paper [GPT-NeoX-20B: An Open-Source Autoregressive Language Model](https://arxiv.org/abs/2204.06745) by Sid Black, Stella Biderman, Eric Hallahan, Quentin Anthony, Leo Gao, Laurence Golding, Horace He, Connor Leahy, Kyle McDonell, Jason Phang, Michael Pieler, USVSN Sai Prashanth, Shivanshu Purohit, Laria Reynolds, Jonathan Tow, Ben Wang, Samuel Weinbach
|
1. **[GPT NeoX](model_doc/gpt_neox)** (from EleutherAI) released with the paper [GPT-NeoX-20B: An Open-Source Autoregressive Language Model](https://arxiv.org/abs/2204.06745) by Sid Black, Stella Biderman, Eric Hallahan, Quentin Anthony, Leo Gao, Laurence Golding, Horace He, Connor Leahy, Kyle McDonell, Jason Phang, Michael Pieler, USVSN Sai Prashanth, Shivanshu Purohit, Laria Reynolds, Jonathan Tow, Ben Wang, Samuel Weinbach
|
||||||
1. **[GPT-2](model_doc/gpt2)** (from OpenAI) released with the paper [Language Models are Unsupervised Multitask Learners](https://blog.openai.com/better-language-models/) by Alec Radford*, Jeffrey Wu*, Rewon Child, David Luan, Dario Amodei** and Ilya Sutskever**.
|
1. **[GPT-2](model_doc/gpt2)** (from OpenAI) released with the paper [Language Models are Unsupervised Multitask Learners](https://blog.openai.com/better-language-models/) by Alec Radford*, Jeffrey Wu*, Rewon Child, David Luan, Dario Amodei** and Ilya Sutskever**.
|
||||||
1. **[GPT-J](model_doc/gptj)** (from EleutherAI) released in the repository [kingoflolz/mesh-transformer-jax](https://github.com/kingoflolz/mesh-transformer-jax/) by Ben Wang and Aran Komatsuzaki.
|
1. **[GPT-J](model_doc/gptj)** (from EleutherAI) released in the repository [kingoflolz/mesh-transformer-jax](https://github.com/kingoflolz/mesh-transformer-jax/) by Ben Wang and Aran Komatsuzaki.
|
||||||
|
1. **[GPTSAN-japanese](model_doc/gptsan-japanese)** released in the repository [tanreinama/GPTSAN](https://github.com/tanreinama/GPTSAN/blob/main/report/model.md) by Toshiyuki Sakamoto(tanreinama).
|
||||||
1. **[GroupViT](model_doc/groupvit)** (from UCSD, NVIDIA) released with the paper [GroupViT: Semantic Segmentation Emerges from Text Supervision](https://arxiv.org/abs/2202.11094) by Jiarui Xu, Shalini De Mello, Sifei Liu, Wonmin Byeon, Thomas Breuel, Jan Kautz, Xiaolong Wang.
|
1. **[GroupViT](model_doc/groupvit)** (from UCSD, NVIDIA) released with the paper [GroupViT: Semantic Segmentation Emerges from Text Supervision](https://arxiv.org/abs/2202.11094) by Jiarui Xu, Shalini De Mello, Sifei Liu, Wonmin Byeon, Thomas Breuel, Jan Kautz, Xiaolong Wang.
|
||||||
1. **[Hubert](model_doc/hubert)** (from Facebook) released with the paper [HuBERT: Self-Supervised Speech Representation Learning by Masked Prediction of Hidden Units](https://arxiv.org/abs/2106.07447) by Wei-Ning Hsu, Benjamin Bolte, Yao-Hung Hubert Tsai, Kushal Lakhotia, Ruslan Salakhutdinov, Abdelrahman Mohamed.
|
1. **[Hubert](model_doc/hubert)** (from Facebook) released with the paper [HuBERT: Self-Supervised Speech Representation Learning by Masked Prediction of Hidden Units](https://arxiv.org/abs/2106.07447) by Wei-Ning Hsu, Benjamin Bolte, Yao-Hung Hubert Tsai, Kushal Lakhotia, Ruslan Salakhutdinov, Abdelrahman Mohamed.
|
||||||
1. **[I-BERT](model_doc/ibert)** (from Berkeley) released with the paper [I-BERT: Integer-only BERT Quantization](https://arxiv.org/abs/2101.01321) by Sehoon Kim, Amir Gholami, Zhewei Yao, Michael W. Mahoney, Kurt Keutzer.
|
1. **[I-BERT](model_doc/ibert)** (from Berkeley) released with the paper [I-BERT: Integer-only BERT Quantization](https://arxiv.org/abs/2101.01321) by Sehoon Kim, Amir Gholami, Zhewei Yao, Michael W. Mahoney, Kurt Keutzer.
|
||||||
@ -115,6 +123,7 @@ Die Bibliothek enthält derzeit JAX-, PyTorch- und TensorFlow-Implementierungen,
|
|||||||
1. **[M-CTC-T](model_doc/mctct)** (from Facebook) released with the paper [Pseudo-Labeling For Massively Multilingual Speech Recognition](https://arxiv.org/abs/2111.00161) by Loren Lugosch, Tatiana Likhomanenko, Gabriel Synnaeve, and Ronan Collobert.
|
1. **[M-CTC-T](model_doc/mctct)** (from Facebook) released with the paper [Pseudo-Labeling For Massively Multilingual Speech Recognition](https://arxiv.org/abs/2111.00161) by Loren Lugosch, Tatiana Likhomanenko, Gabriel Synnaeve, and Ronan Collobert.
|
||||||
1. **[M2M100](model_doc/m2m_100)** (from Facebook) released with the paper [Beyond English-Centric Multilingual Machine Translation](https://arxiv.org/abs/2010.11125) by Angela Fan, Shruti Bhosale, Holger Schwenk, Zhiyi Ma, Ahmed El-Kishky, Siddharth Goyal, Mandeep Baines, Onur Celebi, Guillaume Wenzek, Vishrav Chaudhary, Naman Goyal, Tom Birch, Vitaliy Liptchinsky, Sergey Edunov, Edouard Grave, Michael Auli, Armand Joulin.
|
1. **[M2M100](model_doc/m2m_100)** (from Facebook) released with the paper [Beyond English-Centric Multilingual Machine Translation](https://arxiv.org/abs/2010.11125) by Angela Fan, Shruti Bhosale, Holger Schwenk, Zhiyi Ma, Ahmed El-Kishky, Siddharth Goyal, Mandeep Baines, Onur Celebi, Guillaume Wenzek, Vishrav Chaudhary, Naman Goyal, Tom Birch, Vitaliy Liptchinsky, Sergey Edunov, Edouard Grave, Michael Auli, Armand Joulin.
|
||||||
1. **[MarianMT](model_doc/marian)** Machine translation models trained using [OPUS](http://opus.nlpl.eu/) data by Jörg Tiedemann. The [Marian Framework](https://marian-nmt.github.io/) is being developed by the Microsoft Translator Team.
|
1. **[MarianMT](model_doc/marian)** Machine translation models trained using [OPUS](http://opus.nlpl.eu/) data by Jörg Tiedemann. The [Marian Framework](https://marian-nmt.github.io/) is being developed by the Microsoft Translator Team.
|
||||||
|
1. **[Mask2Former](model_doc/mask2former)** (from FAIR and UIUC) released with the paper [Masked-attention Mask Transformer for Universal Image Segmentation](https://arxiv.org/abs/2112.01527) by Bowen Cheng, Ishan Misra, Alexander G. Schwing, Alexander Kirillov, Rohit Girdhar.
|
||||||
1. **[MaskFormer](model_doc/maskformer)** (from Meta and UIUC) released with the paper [Per-Pixel Classification is Not All You Need for Semantic Segmentation](https://arxiv.org/abs/2107.06278) by Bowen Cheng, Alexander G. Schwing, Alexander Kirillov.
|
1. **[MaskFormer](model_doc/maskformer)** (from Meta and UIUC) released with the paper [Per-Pixel Classification is Not All You Need for Semantic Segmentation](https://arxiv.org/abs/2107.06278) by Bowen Cheng, Alexander G. Schwing, Alexander Kirillov.
|
||||||
1. **[mBART](model_doc/mbart)** (from Facebook) released with the paper [Multilingual Denoising Pre-training for Neural Machine Translation](https://arxiv.org/abs/2001.08210) by Yinhan Liu, Jiatao Gu, Naman Goyal, Xian Li, Sergey Edunov, Marjan Ghazvininejad, Mike Lewis, Luke Zettlemoyer.
|
1. **[mBART](model_doc/mbart)** (from Facebook) released with the paper [Multilingual Denoising Pre-training for Neural Machine Translation](https://arxiv.org/abs/2001.08210) by Yinhan Liu, Jiatao Gu, Naman Goyal, Xian Li, Sergey Edunov, Marjan Ghazvininejad, Mike Lewis, Luke Zettlemoyer.
|
||||||
1. **[mBART-50](model_doc/mbart)** (from Facebook) released with the paper [Multilingual Translation with Extensible Multilingual Pretraining and Finetuning](https://arxiv.org/abs/2008.00401) by Yuqing Tang, Chau Tran, Xian Li, Peng-Jen Chen, Naman Goyal, Vishrav Chaudhary, Jiatao Gu, Angela Fan.
|
1. **[mBART-50](model_doc/mbart)** (from Facebook) released with the paper [Multilingual Translation with Extensible Multilingual Pretraining and Finetuning](https://arxiv.org/abs/2008.00401) by Yuqing Tang, Chau Tran, Xian Li, Peng-Jen Chen, Naman Goyal, Vishrav Chaudhary, Jiatao Gu, Angela Fan.
|
||||||
@ -129,6 +138,7 @@ Die Bibliothek enthält derzeit JAX-, PyTorch- und TensorFlow-Implementierungen,
|
|||||||
1. **[Nezha](model_doc/nezha)** (from Huawei Noah’s Ark Lab) released with the paper [NEZHA: Neural Contextualized Representation for Chinese Language Understanding](https://arxiv.org/abs/1909.00204) by Junqiu Wei, Xiaozhe Ren, Xiaoguang Li, Wenyong Huang, Yi Liao, Yasheng Wang, Jiashu Lin, Xin Jiang, Xiao Chen and Qun Liu.
|
1. **[Nezha](model_doc/nezha)** (from Huawei Noah’s Ark Lab) released with the paper [NEZHA: Neural Contextualized Representation for Chinese Language Understanding](https://arxiv.org/abs/1909.00204) by Junqiu Wei, Xiaozhe Ren, Xiaoguang Li, Wenyong Huang, Yi Liao, Yasheng Wang, Jiashu Lin, Xin Jiang, Xiao Chen and Qun Liu.
|
||||||
1. **[NLLB](model_doc/nllb)** (from Meta) released with the paper [No Language Left Behind: Scaling Human-Centered Machine Translation](https://arxiv.org/abs/2207.04672) by the NLLB team.
|
1. **[NLLB](model_doc/nllb)** (from Meta) released with the paper [No Language Left Behind: Scaling Human-Centered Machine Translation](https://arxiv.org/abs/2207.04672) by the NLLB team.
|
||||||
1. **[Nyströmformer](model_doc/nystromformer)** (from the University of Wisconsin - Madison) released with the paper [Nyströmformer: A Nyström-Based Algorithm for Approximating Self-Attention](https://arxiv.org/abs/2102.03902) by Yunyang Xiong, Zhanpeng Zeng, Rudrasis Chakraborty, Mingxing Tan, Glenn Fung, Yin Li, Vikas Singh.
|
1. **[Nyströmformer](model_doc/nystromformer)** (from the University of Wisconsin - Madison) released with the paper [Nyströmformer: A Nyström-Based Algorithm for Approximating Self-Attention](https://arxiv.org/abs/2102.03902) by Yunyang Xiong, Zhanpeng Zeng, Rudrasis Chakraborty, Mingxing Tan, Glenn Fung, Yin Li, Vikas Singh.
|
||||||
|
1. **[OneFormer](model_doc/oneformer)** (from SHI Labs) released with the paper [OneFormer: One Transformer to Rule Universal Image Segmentation](https://arxiv.org/abs/2211.06220) by Jitesh Jain, Jiachen Li, MangTik Chiu, Ali Hassani, Nikita Orlov, Humphrey Shi.
|
||||||
1. **[OPT](master/model_doc/opt)** (from Meta AI) released with the paper [OPT: Open Pre-trained Transformer Language Models](https://arxiv.org/abs/2205.01068) by Susan Zhang, Stephen Roller, Naman Goyal, Mikel Artetxe, Moya Chen, Shuohui Chen et al.
|
1. **[OPT](master/model_doc/opt)** (from Meta AI) released with the paper [OPT: Open Pre-trained Transformer Language Models](https://arxiv.org/abs/2205.01068) by Susan Zhang, Stephen Roller, Naman Goyal, Mikel Artetxe, Moya Chen, Shuohui Chen et al.
|
||||||
1. **[OWL-ViT](model_doc/owlvit)** (from Google AI) released with the paper [Simple Open-Vocabulary Object Detection with Vision Transformers](https://arxiv.org/abs/2205.06230) by Matthias Minderer, Alexey Gritsenko, Austin Stone, Maxim Neumann, Dirk Weissenborn, Alexey Dosovitskiy, Aravindh Mahendran, Anurag Arnab, Mostafa Dehghani, Zhuoran Shen, Xiao Wang, Xiaohua Zhai, Thomas Kipf, and Neil Houlsby.
|
1. **[OWL-ViT](model_doc/owlvit)** (from Google AI) released with the paper [Simple Open-Vocabulary Object Detection with Vision Transformers](https://arxiv.org/abs/2205.06230) by Matthias Minderer, Alexey Gritsenko, Austin Stone, Maxim Neumann, Dirk Weissenborn, Alexey Dosovitskiy, Aravindh Mahendran, Anurag Arnab, Mostafa Dehghani, Zhuoran Shen, Xiao Wang, Xiaohua Zhai, Thomas Kipf, and Neil Houlsby.
|
||||||
1. **[Pegasus](model_doc/pegasus)** (from Google) released with the paper [PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive Summarization](https://arxiv.org/abs/1912.08777) by Jingqing Zhang, Yao Zhao, Mohammad Saleh and Peter J. Liu.
|
1. **[Pegasus](model_doc/pegasus)** (from Google) released with the paper [PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive Summarization](https://arxiv.org/abs/1912.08777) by Jingqing Zhang, Yao Zhao, Mohammad Saleh and Peter J. Liu.
|
||||||
@ -163,6 +173,7 @@ Die Bibliothek enthält derzeit JAX-, PyTorch- und TensorFlow-Implementierungen,
|
|||||||
1. **[Transformer-XL](model_doc/transfo-xl)** (from Google/CMU) released with the paper [Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context](https://arxiv.org/abs/1901.02860) by Zihang Dai*, Zhilin Yang*, Yiming Yang, Jaime Carbonell, Quoc V. Le, Ruslan Salakhutdinov.
|
1. **[Transformer-XL](model_doc/transfo-xl)** (from Google/CMU) released with the paper [Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context](https://arxiv.org/abs/1901.02860) by Zihang Dai*, Zhilin Yang*, Yiming Yang, Jaime Carbonell, Quoc V. Le, Ruslan Salakhutdinov.
|
||||||
1. **[TrOCR](model_doc/trocr)** (from Microsoft), released together with the paper [TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models](https://arxiv.org/abs/2109.10282) by Minghao Li, Tengchao Lv, Lei Cui, Yijuan Lu, Dinei Florencio, Cha Zhang, Zhoujun Li, Furu Wei.
|
1. **[TrOCR](model_doc/trocr)** (from Microsoft), released together with the paper [TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models](https://arxiv.org/abs/2109.10282) by Minghao Li, Tengchao Lv, Lei Cui, Yijuan Lu, Dinei Florencio, Cha Zhang, Zhoujun Li, Furu Wei.
|
||||||
1. **[UL2](model_doc/ul2)** (from Google Research) released with the paper [Unifying Language Learning Paradigms](https://arxiv.org/abs/2205.05131v1) by Yi Tay, Mostafa Dehghani, Vinh Q. Tran, Xavier Garcia, Dara Bahri, Tal Schuster, Huaixiu Steven Zheng, Neil Houlsby, Donald Metzler
|
1. **[UL2](model_doc/ul2)** (from Google Research) released with the paper [Unifying Language Learning Paradigms](https://arxiv.org/abs/2205.05131v1) by Yi Tay, Mostafa Dehghani, Vinh Q. Tran, Xavier Garcia, Dara Bahri, Tal Schuster, Huaixiu Steven Zheng, Neil Houlsby, Donald Metzler
|
||||||
|
1. **[UMT5](model_doc/umt5)** (from Google Research) released with the paper [UniMax: Fairer and More Effective Language Sampling for Large-Scale Multilingual Pretraining](https://openreview.net/forum?id=kXwdL1cWOAi) by Hyung Won Chung, Xavier Garcia, Adam Roberts, Yi Tay, Orhan Firat, Sharan Narang, Noah Constant.
|
||||||
1. **[UniSpeech](model_doc/unispeech)** (from Microsoft Research) released with the paper [UniSpeech: Unified Speech Representation Learning with Labeled and Unlabeled Data](https://arxiv.org/abs/2101.07597) by Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei, Michael Zeng, Xuedong Huang.
|
1. **[UniSpeech](model_doc/unispeech)** (from Microsoft Research) released with the paper [UniSpeech: Unified Speech Representation Learning with Labeled and Unlabeled Data](https://arxiv.org/abs/2101.07597) by Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei, Michael Zeng, Xuedong Huang.
|
||||||
1. **[UniSpeechSat](model_doc/unispeech-sat)** (from Microsoft Research) released with the paper [UNISPEECH-SAT: UNIVERSAL SPEECH REPRESENTATION LEARNING WITH SPEAKER AWARE PRE-TRAINING](https://arxiv.org/abs/2110.05752) by Sanyuan Chen, Yu Wu, Chengyi Wang, Zhengyang Chen, Zhuo Chen, Shujie Liu, Jian Wu, Yao Qian, Furu Wei, Jinyu Li, Xiangzhan Yu.
|
1. **[UniSpeechSat](model_doc/unispeech-sat)** (from Microsoft Research) released with the paper [UNISPEECH-SAT: UNIVERSAL SPEECH REPRESENTATION LEARNING WITH SPEAKER AWARE PRE-TRAINING](https://arxiv.org/abs/2110.05752) by Sanyuan Chen, Yu Wu, Chengyi Wang, Zhengyang Chen, Zhuo Chen, Shujie Liu, Jian Wu, Yao Qian, Furu Wei, Jinyu Li, Xiangzhan Yu.
|
||||||
1. **[VAN](model_doc/van)** (from Tsinghua University and Nankai University) released with the paper [Visual Attention Network](https://arxiv.org/abs/2202.09741) by Meng-Hao Guo, Cheng-Ze Lu, Zheng-Ning Liu, Ming-Ming Cheng, Shi-Min Hu.
|
1. **[VAN](model_doc/van)** (from Tsinghua University and Nankai University) released with the paper [Visual Attention Network](https://arxiv.org/abs/2202.09741) by Meng-Hao Guo, Cheng-Ze Lu, Zheng-Ning Liu, Ming-Ming Cheng, Shi-Min Hu.
|
||||||
@ -180,6 +191,7 @@ Die Bibliothek enthält derzeit JAX-, PyTorch- und TensorFlow-Implementierungen,
|
|||||||
1. **[XLM-ProphetNet](model_doc/xlm-prophetnet)** (from Microsoft Research) released with the paper [ProphetNet: Predicting Future N-gram for Sequence-to-Sequence Pre-training](https://arxiv.org/abs/2001.04063) by Yu Yan, Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang and Ming Zhou.
|
1. **[XLM-ProphetNet](model_doc/xlm-prophetnet)** (from Microsoft Research) released with the paper [ProphetNet: Predicting Future N-gram for Sequence-to-Sequence Pre-training](https://arxiv.org/abs/2001.04063) by Yu Yan, Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang and Ming Zhou.
|
||||||
1. **[XLM-RoBERTa](model_doc/xlm-roberta)** (from Facebook AI), released together with the paper [Unsupervised Cross-lingual Representation Learning at Scale](https://arxiv.org/abs/1911.02116) by Alexis Conneau*, Kartikay Khandelwal*, Naman Goyal, Vishrav Chaudhary, Guillaume Wenzek, Francisco Guzmán, Edouard Grave, Myle Ott, Luke Zettlemoyer and Veselin Stoyanov.
|
1. **[XLM-RoBERTa](model_doc/xlm-roberta)** (from Facebook AI), released together with the paper [Unsupervised Cross-lingual Representation Learning at Scale](https://arxiv.org/abs/1911.02116) by Alexis Conneau*, Kartikay Khandelwal*, Naman Goyal, Vishrav Chaudhary, Guillaume Wenzek, Francisco Guzmán, Edouard Grave, Myle Ott, Luke Zettlemoyer and Veselin Stoyanov.
|
||||||
1. **[XLM-RoBERTa-XL](model_doc/xlm-roberta-xl)** (from Facebook AI), released together with the paper [Larger-Scale Transformers for Multilingual Masked Language Modeling](https://arxiv.org/abs/2105.00572) by Naman Goyal, Jingfei Du, Myle Ott, Giri Anantharaman, Alexis Conneau.
|
1. **[XLM-RoBERTa-XL](model_doc/xlm-roberta-xl)** (from Facebook AI), released together with the paper [Larger-Scale Transformers for Multilingual Masked Language Modeling](https://arxiv.org/abs/2105.00572) by Naman Goyal, Jingfei Du, Myle Ott, Giri Anantharaman, Alexis Conneau.
|
||||||
|
1. **[XLM-V](model_doc/xlm-v)** (from Meta AI) released with the paper [XLM-V: Overcoming the Vocabulary Bottleneck in Multilingual Masked Language Models](https://arxiv.org/abs/2301.10472) by Davis Liang, Hila Gonen, Yuning Mao, Rui Hou, Naman Goyal, Marjan Ghazvininejad, Luke Zettlemoyer, Madian Khabsa.
|
||||||
1. **[XLNet](model_doc/xlnet)** (from Google/CMU) released with the paper [XLNet: Generalized Autoregressive Pretraining for Language Understanding](https://arxiv.org/abs/1906.08237) by Zhilin Yang*, Zihang Dai*, Yiming Yang, Jaime Carbonell, Ruslan Salakhutdinov, Quoc V. Le.
|
1. **[XLNet](model_doc/xlnet)** (from Google/CMU) released with the paper [XLNet: Generalized Autoregressive Pretraining for Language Understanding](https://arxiv.org/abs/1906.08237) by Zhilin Yang*, Zihang Dai*, Yiming Yang, Jaime Carbonell, Ruslan Salakhutdinov, Quoc V. Le.
|
||||||
1. **[XLS-R](model_doc/xls_r)** (from Facebook AI) released with the paper [XLS-R: Self-supervised Cross-lingual Speech Representation Learning at Scale](https://arxiv.org/abs/2111.09296) by Arun Babu, Changhan Wang, Andros Tjandra, Kushal Lakhotia, Qiantong Xu, Naman Goyal, Kritika Singh, Patrick von Platen, Yatharth Saraf, Juan Pino, Alexei Baevski, Alexis Conneau, Michael Auli.
|
1. **[XLS-R](model_doc/xls_r)** (from Facebook AI) released with the paper [XLS-R: Self-supervised Cross-lingual Speech Representation Learning at Scale](https://arxiv.org/abs/2111.09296) by Arun Babu, Changhan Wang, Andros Tjandra, Kushal Lakhotia, Qiantong Xu, Naman Goyal, Kritika Singh, Patrick von Platen, Yatharth Saraf, Juan Pino, Alexei Baevski, Alexis Conneau, Michael Auli.
|
||||||
1. **[XLSR-Wav2Vec2](model_doc/xlsr_wav2vec2)** (from Facebook AI) released with the paper [Unsupervised Cross-Lingual Representation Learning For Speech Recognition](https://arxiv.org/abs/2006.13979) by Alexis Conneau, Alexei Baevski, Ronan Collobert, Abdelrahman Mohamed, Michael Auli.
|
1. **[XLSR-Wav2Vec2](model_doc/xlsr_wav2vec2)** (from Facebook AI) released with the paper [Unsupervised Cross-Lingual Representation Learning For Speech Recognition](https://arxiv.org/abs/2006.13979) by Alexis Conneau, Alexei Baevski, Ronan Collobert, Abdelrahman Mohamed, Michael Auli.
|
||||||
@ -276,9 +288,9 @@ Flax), PyTorch, und/oder TensorFlow haben.
|
|||||||
| RAG | ✅ | ❌ | ✅ | ✅ | ❌ |
|
| RAG | ✅ | ❌ | ✅ | ✅ | ❌ |
|
||||||
| REALM | ✅ | ✅ | ✅ | ❌ | ❌ |
|
| REALM | ✅ | ✅ | ✅ | ❌ | ❌ |
|
||||||
| Reformer | ✅ | ✅ | ✅ | ❌ | ❌ |
|
| Reformer | ✅ | ✅ | ✅ | ❌ | ❌ |
|
||||||
| RegNet | ❌ | ❌ | ✅ | ✅ | ❌ |
|
| RegNet | ❌ | ❌ | ✅ | ✅ | ✅ |
|
||||||
| RemBERT | ✅ | ✅ | ✅ | ✅ | ❌ |
|
| RemBERT | ✅ | ✅ | ✅ | ✅ | ❌ |
|
||||||
| ResNet | ❌ | ❌ | ✅ | ✅ | ❌ |
|
| ResNet | ❌ | ❌ | ✅ | ✅ | ✅ |
|
||||||
| RetriBERT | ✅ | ✅ | ✅ | ❌ | ❌ |
|
| RetriBERT | ✅ | ✅ | ✅ | ❌ | ❌ |
|
||||||
| RoBERTa | ✅ | ✅ | ✅ | ✅ | ✅ |
|
| RoBERTa | ✅ | ✅ | ✅ | ✅ | ✅ |
|
||||||
| RoFormer | ✅ | ✅ | ✅ | ✅ | ✅ |
|
| RoFormer | ✅ | ✅ | ✅ | ✅ | ✅ |
|
||||||
@ -12,6 +12,10 @@ distributed under the License is distributed on an "AS IS" BASIS,
|
|||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
See the License for the specific language governing permissions and
|
See the License for the specific language governing permissions and
|
||||||
limitations under the License.
|
limitations under the License.
|
||||||
|
|
||||||
|
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||||
|
rendered properly in your Markdown viewer.
|
||||||
|
|
||||||
-->
|
-->
|
||||||
|
|
||||||
# Installation
|
# Installation
|
||||||
@ -8,6 +8,10 @@ http://www.apache.org/licenses/LICENSE-2.0
|
|||||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
specific language governing permissions and limitations under the License.
|
specific language governing permissions and limitations under the License.
|
||||||
|
|
||||||
|
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||||
|
rendered properly in your Markdown viewer.
|
||||||
|
|
||||||
-->
|
-->
|
||||||
|
|
||||||
# Ein Modell teilen
|
# Ein Modell teilen
|
||||||
@ -146,7 +150,7 @@ Geben Sie ein Modell mit [`PushToHubCallback`] an den Hub weiter. In der [`PushT
|
|||||||
- Die `hub_model_id`, die Ihr Hub-Benutzername und Modellname ist.
|
- Die `hub_model_id`, die Ihr Hub-Benutzername und Modellname ist.
|
||||||
|
|
||||||
```py
|
```py
|
||||||
>>> from transformers.keras.callbacks import PushToHubCallback
|
>>> from transformers import PushToHubCallback
|
||||||
|
|
||||||
>>> push_to_hub_callback = PushToHubCallback(
|
>>> push_to_hub_callback = PushToHubCallback(
|
||||||
... output_dir="./your_model_save_path", tokenizer=tokenizer, hub_model_id="your-username/my-awesome-model"
|
... output_dir="./your_model_save_path", tokenizer=tokenizer, hub_model_id="your-username/my-awesome-model"
|
||||||
@ -8,6 +8,10 @@ http://www.apache.org/licenses/LICENSE-2.0
|
|||||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
specific language governing permissions and limitations under the License.
|
specific language governing permissions and limitations under the License.
|
||||||
|
|
||||||
|
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||||
|
rendered properly in your Markdown viewer.
|
||||||
|
|
||||||
-->
|
-->
|
||||||
|
|
||||||
# Pipelines für Inferenzen
|
# Pipelines für Inferenzen
|
||||||
@ -56,7 +60,7 @@ Wenn Sie mehr als eine Eingabe haben, übergeben Sie die Eingabe als Liste:
|
|||||||
... ) # doctest: +SKIP
|
... ) # doctest: +SKIP
|
||||||
```
|
```
|
||||||
|
|
||||||
Alle zusätzlichen Parameter für Ihre Aufgabe können auch in die [`pipeline`] aufgenommen werden. Die Aufgabe `Text-Generierung` hat eine [`~generation_utils.GenerationMixin.generate`]-Methode mit mehreren Parametern zur Steuerung der Ausgabe. Wenn Sie zum Beispiel mehr als eine Ausgabe erzeugen wollen, setzen Sie den Parameter `num_return_sequences`:
|
Alle zusätzlichen Parameter für Ihre Aufgabe können auch in die [`pipeline`] aufgenommen werden. Die Aufgabe `Text-Generierung` hat eine [`~generation.GenerationMixin.generate`]-Methode mit mehreren Parametern zur Steuerung der Ausgabe. Wenn Sie zum Beispiel mehr als eine Ausgabe erzeugen wollen, setzen Sie den Parameter `num_return_sequences`:
|
||||||
|
|
||||||
```py
|
```py
|
||||||
>>> generator(
|
>>> generator(
|
||||||
@ -8,6 +8,10 @@ http://www.apache.org/licenses/LICENSE-2.0
|
|||||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
specific language governing permissions and limitations under the License.
|
specific language governing permissions and limitations under the License.
|
||||||
|
|
||||||
|
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||||
|
rendered properly in your Markdown viewer.
|
||||||
|
|
||||||
-->
|
-->
|
||||||
|
|
||||||
# Vorverarbeiten
|
# Vorverarbeiten
|
||||||
@ -350,12 +354,12 @@ Als Nächstes sehen Sie sich das Bild mit dem Merkmal 🤗 Datensätze [Bild] (h
|
|||||||
|
|
||||||
### Merkmalsextraktor
|
### Merkmalsextraktor
|
||||||
|
|
||||||
Laden Sie den Merkmalsextraktor mit [`AutoFeatureExtractor.from_pretrained`]:
|
Laden Sie den Merkmalsextraktor mit [`AutoImageProcessor.from_pretrained`]:
|
||||||
|
|
||||||
```py
|
```py
|
||||||
>>> from transformers import AutoFeatureExtractor
|
>>> from transformers import AutoImageProcessor
|
||||||
|
|
||||||
>>> feature_extractor = AutoFeatureExtractor.from_pretrained("google/vit-base-patch16-224")
|
>>> image_processor = AutoImageProcessor.from_pretrained("google/vit-base-patch16-224")
|
||||||
```
|
```
|
||||||
|
|
||||||
### Datenerweiterung
|
### Datenerweiterung
|
||||||
@ -367,9 +371,9 @@ Bei Bildverarbeitungsaufgaben ist es üblich, den Bildern als Teil der Vorverarb
|
|||||||
```py
|
```py
|
||||||
>>> from torchvision.transforms import Compose, Normalize, RandomResizedCrop, ColorJitter, ToTensor
|
>>> from torchvision.transforms import Compose, Normalize, RandomResizedCrop, ColorJitter, ToTensor
|
||||||
|
|
||||||
>>> normalize = Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std)
|
>>> normalize = Normalize(mean=image_processor.image_mean, std=image_processor.image_std)
|
||||||
>>> _transforms = Compose(
|
>>> _transforms = Compose(
|
||||||
... [RandomResizedCrop(feature_extractor.size), ColorJitter(brightness=0.5, hue=0.5), ToTensor(), normalize]
|
... [RandomResizedCrop(image_processor.size["height"]), ColorJitter(brightness=0.5, hue=0.5), ToTensor(), normalize]
|
||||||
... )
|
... )
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -8,6 +8,10 @@ http://www.apache.org/licenses/LICENSE-2.0
|
|||||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
specific language governing permissions and limitations under the License.
|
specific language governing permissions and limitations under the License.
|
||||||
|
|
||||||
|
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||||
|
rendered properly in your Markdown viewer.
|
||||||
|
|
||||||
-->
|
-->
|
||||||
|
|
||||||
# Schnellstart
|
# Schnellstart
|
||||||
@ -8,6 +8,10 @@ http://www.apache.org/licenses/LICENSE-2.0
|
|||||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
specific language governing permissions and limitations under the License.
|
specific language governing permissions and limitations under the License.
|
||||||
|
|
||||||
|
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||||
|
rendered properly in your Markdown viewer.
|
||||||
|
|
||||||
-->
|
-->
|
||||||
|
|
||||||
# Optimierung eines vortrainierten Modells
|
# Optimierung eines vortrainierten Modells
|
||||||
@ -185,6 +189,8 @@ from transformers import AutoTokenizer
|
|||||||
|
|
||||||
tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
|
tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
|
||||||
tokenized_data = tokenizer(dataset["text"], return_tensors="np", padding=True)
|
tokenized_data = tokenizer(dataset["text"], return_tensors="np", padding=True)
|
||||||
|
# Tokenizer returns a BatchEncoding, but we convert that to a dict for Keras
|
||||||
|
tokenized_data = dict(tokenized_data)
|
||||||
|
|
||||||
labels = np.array(dataset["label"]) # Label is already an array of 0 and 1
|
labels = np.array(dataset["label"]) # Label is already an array of 0 and 1
|
||||||
```
|
```
|
||||||
@ -8,43 +8,24 @@
|
|||||||
title: Get started
|
title: Get started
|
||||||
- sections:
|
- sections:
|
||||||
- local: pipeline_tutorial
|
- local: pipeline_tutorial
|
||||||
title: Pipelines for inference
|
title: Run inference with pipelines
|
||||||
- local: autoclass_tutorial
|
- local: autoclass_tutorial
|
||||||
title: Load pretrained instances with an AutoClass
|
title: Write portable code with AutoClass
|
||||||
- local: preprocessing
|
- local: preprocessing
|
||||||
title: Preprocess
|
title: Preprocess data
|
||||||
- local: training
|
- local: training
|
||||||
title: Fine-tune a pretrained model
|
title: Fine-tune a pretrained model
|
||||||
|
- local: run_scripts
|
||||||
|
title: Train with a script
|
||||||
- local: accelerate
|
- local: accelerate
|
||||||
title: Distributed training with 🤗 Accelerate
|
title: Set up distributed training with 🤗 Accelerate
|
||||||
- local: model_sharing
|
- local: model_sharing
|
||||||
title: Share a model
|
title: Share your model
|
||||||
|
- local: transformers_agents
|
||||||
|
title: Agents
|
||||||
title: Tutorials
|
title: Tutorials
|
||||||
- sections:
|
- sections:
|
||||||
- sections:
|
- sections:
|
||||||
- local: create_a_model
|
|
||||||
title: Create a custom architecture
|
|
||||||
- local: custom_models
|
|
||||||
title: Sharing custom models
|
|
||||||
- local: run_scripts
|
|
||||||
title: Train with a script
|
|
||||||
- local: sagemaker
|
|
||||||
title: Run training on Amazon SageMaker
|
|
||||||
- local: converting_tensorflow_models
|
|
||||||
title: Converting from TensorFlow checkpoints
|
|
||||||
- local: serialization
|
|
||||||
title: Export to ONNX
|
|
||||||
- local: torchscript
|
|
||||||
title: Export to TorchScript
|
|
||||||
- local: troubleshooting
|
|
||||||
title: Troubleshoot
|
|
||||||
title: General usage
|
|
||||||
- sections:
|
|
||||||
- local: fast_tokenizers
|
|
||||||
title: Use tokenizers from 🤗 Tokenizers
|
|
||||||
- local: multilingual
|
|
||||||
title: Inference for multilingual models
|
|
||||||
- sections:
|
|
||||||
- local: tasks/sequence_classification
|
- local: tasks/sequence_classification
|
||||||
title: Text classification
|
title: Text classification
|
||||||
- local: tasks/token_classification
|
- local: tasks/token_classification
|
||||||
@ -52,29 +33,82 @@
|
|||||||
- local: tasks/question_answering
|
- local: tasks/question_answering
|
||||||
title: Question answering
|
title: Question answering
|
||||||
- local: tasks/language_modeling
|
- local: tasks/language_modeling
|
||||||
title: Language modeling
|
title: Causal language modeling
|
||||||
|
- local: tasks/masked_language_modeling
|
||||||
|
title: Masked language modeling
|
||||||
- local: tasks/translation
|
- local: tasks/translation
|
||||||
title: Translation
|
title: Translation
|
||||||
- local: tasks/summarization
|
- local: tasks/summarization
|
||||||
title: Summarization
|
title: Summarization
|
||||||
- local: tasks/multiple_choice
|
- local: tasks/multiple_choice
|
||||||
title: Multiple choice
|
title: Multiple choice
|
||||||
title: Task guides
|
|
||||||
isExpanded: false
|
|
||||||
title: Natural Language Processing
|
title: Natural Language Processing
|
||||||
|
isExpanded: false
|
||||||
- sections:
|
- sections:
|
||||||
- local: tasks/audio_classification
|
- local: tasks/audio_classification
|
||||||
title: Audio classification
|
title: Audio classification
|
||||||
- local: tasks/asr
|
- local: tasks/asr
|
||||||
title: Automatic speech recognition
|
title: Automatic speech recognition
|
||||||
title: Audio
|
title: Audio
|
||||||
|
isExpanded: false
|
||||||
- sections:
|
- sections:
|
||||||
- local: tasks/image_classification
|
- local: tasks/image_classification
|
||||||
title: Image classification
|
title: Image classification
|
||||||
- local: tasks/semantic_segmentation
|
- local: tasks/semantic_segmentation
|
||||||
title: Semantic segmentation
|
title: Semantic segmentation
|
||||||
|
- local: tasks/video_classification
|
||||||
|
title: Video classification
|
||||||
|
- local: tasks/object_detection
|
||||||
|
title: Object detection
|
||||||
|
- local: tasks/zero_shot_object_detection
|
||||||
|
title: Zero-shot object detection
|
||||||
|
- local: tasks/zero_shot_image_classification
|
||||||
|
title: Zero-shot image classification
|
||||||
|
- local: tasks/monocular_depth_estimation
|
||||||
|
title: Depth estimation
|
||||||
title: Computer Vision
|
title: Computer Vision
|
||||||
|
isExpanded: false
|
||||||
- sections:
|
- sections:
|
||||||
|
- local: tasks/image_captioning
|
||||||
|
title: Image captioning
|
||||||
|
- local: tasks/document_question_answering
|
||||||
|
title: Document Question Answering
|
||||||
|
- local: tasks/text-to-speech
|
||||||
|
title: Text to speech
|
||||||
|
title: Multimodal
|
||||||
|
isExpanded: false
|
||||||
|
title: Task Guides
|
||||||
|
- sections:
|
||||||
|
- local: fast_tokenizers
|
||||||
|
title: Use fast tokenizers from 🤗 Tokenizers
|
||||||
|
- local: multilingual
|
||||||
|
title: Run inference with multilingual models
|
||||||
|
- local: generation_strategies
|
||||||
|
title: Customize text generation strategy
|
||||||
|
- local: create_a_model
|
||||||
|
title: Use model-specific APIs
|
||||||
|
- local: custom_models
|
||||||
|
title: Share a custom model
|
||||||
|
- local: sagemaker
|
||||||
|
title: Run training on Amazon SageMaker
|
||||||
|
- local: serialization
|
||||||
|
title: Export to ONNX
|
||||||
|
- local: tflite
|
||||||
|
title: Export to TFLite
|
||||||
|
- local: torchscript
|
||||||
|
title: Export to TorchScript
|
||||||
|
- local: benchmarks
|
||||||
|
title: Benchmarks
|
||||||
|
- local: notebooks
|
||||||
|
title: Notebooks with examples
|
||||||
|
- local: community
|
||||||
|
title: Community resources
|
||||||
|
- local: custom_tools
|
||||||
|
title: Custom Tools and Prompts
|
||||||
|
- local: troubleshooting
|
||||||
|
title: Troubleshoot
|
||||||
|
title: Developer guides
|
||||||
|
- sections:
|
||||||
- local: performance
|
- local: performance
|
||||||
title: Overview
|
title: Overview
|
||||||
- local: perf_train_gpu_one
|
- local: perf_train_gpu_one
|
||||||
@ -87,6 +121,8 @@
|
|||||||
title: Training on many CPUs
|
title: Training on many CPUs
|
||||||
- local: perf_train_tpu
|
- local: perf_train_tpu
|
||||||
title: Training on TPUs
|
title: Training on TPUs
|
||||||
|
- local: perf_train_tpu_tf
|
||||||
|
title: Training on TPU with TensorFlow
|
||||||
- local: perf_train_special
|
- local: perf_train_special
|
||||||
title: Training on Specialized Hardware
|
title: Training on Specialized Hardware
|
||||||
- local: perf_infer_cpu
|
- local: perf_infer_cpu
|
||||||
@ -105,8 +141,10 @@
|
|||||||
title: Debugging
|
title: Debugging
|
||||||
- local: hpo_train
|
- local: hpo_train
|
||||||
title: Hyperparameter Search using Trainer API
|
title: Hyperparameter Search using Trainer API
|
||||||
title: Performance and scalability
|
- local: tf_xla
|
||||||
- sections:
|
title: XLA Integration for TensorFlow Models
|
||||||
|
title: Performance and scalability
|
||||||
|
- sections:
|
||||||
- local: contributing
|
- local: contributing
|
||||||
title: How to contribute to transformers?
|
title: How to contribute to transformers?
|
||||||
- local: add_new_model
|
- local: add_new_model
|
||||||
@ -119,36 +157,36 @@
|
|||||||
title: Testing
|
title: Testing
|
||||||
- local: pr_checks
|
- local: pr_checks
|
||||||
title: Checks on a Pull Request
|
title: Checks on a Pull Request
|
||||||
title: Contribute
|
title: Contribute
|
||||||
- local: notebooks
|
|
||||||
title: 🤗 Transformers Notebooks
|
|
||||||
- local: community
|
|
||||||
title: Community resources
|
|
||||||
- local: benchmarks
|
|
||||||
title: Benchmarks
|
|
||||||
- local: migration
|
|
||||||
title: Migrating from previous packages
|
|
||||||
title: How-to guides
|
|
||||||
- sections:
|
- sections:
|
||||||
- local: philosophy
|
- local: philosophy
|
||||||
title: Philosophy
|
title: Philosophy
|
||||||
- local: glossary
|
- local: glossary
|
||||||
title: Glossary
|
title: Glossary
|
||||||
- local: task_summary
|
- local: task_summary
|
||||||
title: Summary of the tasks
|
title: What 🤗 Transformers can do
|
||||||
|
- local: tasks_explained
|
||||||
|
title: How 🤗 Transformers solve tasks
|
||||||
- local: model_summary
|
- local: model_summary
|
||||||
title: Summary of the models
|
title: The Transformer model family
|
||||||
- local: tokenizer_summary
|
- local: tokenizer_summary
|
||||||
title: Summary of the tokenizers
|
title: Summary of the tokenizers
|
||||||
|
- local: attention
|
||||||
|
title: Attention mechanisms
|
||||||
- local: pad_truncation
|
- local: pad_truncation
|
||||||
title: Padding and truncation
|
title: Padding and truncation
|
||||||
- local: bertology
|
- local: bertology
|
||||||
title: BERTology
|
title: BERTology
|
||||||
- local: perplexity
|
- local: perplexity
|
||||||
title: Perplexity of fixed-length models
|
title: Perplexity of fixed-length models
|
||||||
|
- local: pipeline_webserver
|
||||||
|
title: Pipelines for webserver inference
|
||||||
title: Conceptual guides
|
title: Conceptual guides
|
||||||
- sections:
|
- sections:
|
||||||
- sections:
|
- sections:
|
||||||
|
- local: main_classes/agent
|
||||||
|
title: Agents and Tools
|
||||||
- local: model_doc/auto
|
- local: model_doc/auto
|
||||||
title: Auto Classes
|
title: Auto Classes
|
||||||
- local: main_classes/callback
|
- local: main_classes/callback
|
||||||
@ -175,6 +213,8 @@
|
|||||||
title: Pipelines
|
title: Pipelines
|
||||||
- local: main_classes/processors
|
- local: main_classes/processors
|
||||||
title: Processors
|
title: Processors
|
||||||
|
- local: main_classes/quantization
|
||||||
|
title: Quantization
|
||||||
- local: main_classes/tokenizer
|
- local: main_classes/tokenizer
|
||||||
title: Tokenizer
|
title: Tokenizer
|
||||||
- local: main_classes/trainer
|
- local: main_classes/trainer
|
||||||
@ -183,6 +223,8 @@
|
|||||||
title: DeepSpeed Integration
|
title: DeepSpeed Integration
|
||||||
- local: main_classes/feature_extractor
|
- local: main_classes/feature_extractor
|
||||||
title: Feature Extractor
|
title: Feature Extractor
|
||||||
|
- local: main_classes/image_processor
|
||||||
|
title: Image Processor
|
||||||
title: Main Classes
|
title: Main Classes
|
||||||
- sections:
|
- sections:
|
||||||
- isExpanded: false
|
- isExpanded: false
|
||||||
@ -207,6 +249,8 @@
|
|||||||
title: BigBird
|
title: BigBird
|
||||||
- local: model_doc/bigbird_pegasus
|
- local: model_doc/bigbird_pegasus
|
||||||
title: BigBirdPegasus
|
title: BigBirdPegasus
|
||||||
|
- local: model_doc/biogpt
|
||||||
|
title: BioGpt
|
||||||
- local: model_doc/blenderbot
|
- local: model_doc/blenderbot
|
||||||
title: Blenderbot
|
title: Blenderbot
|
||||||
- local: model_doc/blenderbot-small
|
- local: model_doc/blenderbot-small
|
||||||
@ -227,6 +271,8 @@
|
|||||||
title: ConvBERT
|
title: ConvBERT
|
||||||
- local: model_doc/cpm
|
- local: model_doc/cpm
|
||||||
title: CPM
|
title: CPM
|
||||||
|
- local: model_doc/cpmant
|
||||||
|
title: CPMANT
|
||||||
- local: model_doc/ctrl
|
- local: model_doc/ctrl
|
||||||
title: CTRL
|
title: CTRL
|
||||||
- local: model_doc/deberta
|
- local: model_doc/deberta
|
||||||
@ -245,8 +291,14 @@
|
|||||||
title: Encoder Decoder Models
|
title: Encoder Decoder Models
|
||||||
- local: model_doc/ernie
|
- local: model_doc/ernie
|
||||||
title: ERNIE
|
title: ERNIE
|
||||||
|
- local: model_doc/ernie_m
|
||||||
|
title: ErnieM
|
||||||
- local: model_doc/esm
|
- local: model_doc/esm
|
||||||
title: ESM
|
title: ESM
|
||||||
|
- local: model_doc/flan-t5
|
||||||
|
title: FLAN-T5
|
||||||
|
- local: model_doc/flan-ul2
|
||||||
|
title: FLAN-UL2
|
||||||
- local: model_doc/flaubert
|
- local: model_doc/flaubert
|
||||||
title: FlauBERT
|
title: FlauBERT
|
||||||
- local: model_doc/fnet
|
- local: model_doc/fnet
|
||||||
@ -267,16 +319,24 @@
|
|||||||
title: GPT-J
|
title: GPT-J
|
||||||
- local: model_doc/gpt2
|
- local: model_doc/gpt2
|
||||||
title: GPT2
|
title: GPT2
|
||||||
|
- local: model_doc/gpt_bigcode
|
||||||
|
title: GPTBigCode
|
||||||
|
- local: model_doc/gptsan-japanese
|
||||||
|
title: GPTSAN Japanese
|
||||||
|
- local: model_doc/gpt-sw3
|
||||||
|
title: GPTSw3
|
||||||
- local: model_doc/herbert
|
- local: model_doc/herbert
|
||||||
title: HerBERT
|
title: HerBERT
|
||||||
- local: model_doc/ibert
|
- local: model_doc/ibert
|
||||||
title: I-BERT
|
title: I-BERT
|
||||||
- local: model_doc/layoutlm
|
- local: model_doc/jukebox
|
||||||
title: LayoutLM
|
title: Jukebox
|
||||||
- local: model_doc/led
|
- local: model_doc/led
|
||||||
title: LED
|
title: LED
|
||||||
- local: model_doc/lilt
|
- local: model_doc/llama
|
||||||
title: LiLT
|
title: LLaMA
|
||||||
|
- local: model_doc/llama2
|
||||||
|
title: Llama2
|
||||||
- local: model_doc/longformer
|
- local: model_doc/longformer
|
||||||
title: Longformer
|
title: Longformer
|
||||||
- local: model_doc/longt5
|
- local: model_doc/longt5
|
||||||
@ -291,6 +351,8 @@
|
|||||||
title: MarkupLM
|
title: MarkupLM
|
||||||
- local: model_doc/mbart
|
- local: model_doc/mbart
|
||||||
title: MBart and MBart-50
|
title: MBart and MBart-50
|
||||||
|
- local: model_doc/mega
|
||||||
|
title: MEGA
|
||||||
- local: model_doc/megatron-bert
|
- local: model_doc/megatron-bert
|
||||||
title: MegatronBERT
|
title: MegatronBERT
|
||||||
- local: model_doc/megatron_gpt2
|
- local: model_doc/megatron_gpt2
|
||||||
@ -301,6 +363,8 @@
|
|||||||
title: MobileBERT
|
title: MobileBERT
|
||||||
- local: model_doc/mpnet
|
- local: model_doc/mpnet
|
||||||
title: MPNet
|
title: MPNet
|
||||||
|
- local: model_doc/mra
|
||||||
|
title: MRA
|
||||||
- local: model_doc/mt5
|
- local: model_doc/mt5
|
||||||
title: MT5
|
title: MT5
|
||||||
- local: model_doc/mvp
|
- local: model_doc/mvp
|
||||||
@ -309,8 +373,12 @@
|
|||||||
title: NEZHA
|
title: NEZHA
|
||||||
- local: model_doc/nllb
|
- local: model_doc/nllb
|
||||||
title: NLLB
|
title: NLLB
|
||||||
|
- local: model_doc/nllb-moe
|
||||||
|
title: NLLB-MoE
|
||||||
- local: model_doc/nystromformer
|
- local: model_doc/nystromformer
|
||||||
title: Nyströmformer
|
title: Nyströmformer
|
||||||
|
- local: model_doc/open-llama
|
||||||
|
title: Open-Llama
|
||||||
- local: model_doc/opt
|
- local: model_doc/opt
|
||||||
title: OPT
|
title: OPT
|
||||||
- local: model_doc/pegasus
|
- local: model_doc/pegasus
|
||||||
@ -337,24 +405,34 @@
|
|||||||
title: RetriBERT
|
title: RetriBERT
|
||||||
- local: model_doc/roberta
|
- local: model_doc/roberta
|
||||||
title: RoBERTa
|
title: RoBERTa
|
||||||
|
- local: model_doc/roberta-prelayernorm
|
||||||
|
title: RoBERTa-PreLayerNorm
|
||||||
|
- local: model_doc/roc_bert
|
||||||
|
title: RoCBert
|
||||||
- local: model_doc/roformer
|
- local: model_doc/roformer
|
||||||
title: RoFormer
|
title: RoFormer
|
||||||
|
- local: model_doc/rwkv
|
||||||
|
title: RWKV
|
||||||
- local: model_doc/splinter
|
- local: model_doc/splinter
|
||||||
title: Splinter
|
title: Splinter
|
||||||
- local: model_doc/squeezebert
|
- local: model_doc/squeezebert
|
||||||
title: SqueezeBERT
|
title: SqueezeBERT
|
||||||
|
- local: model_doc/switch_transformers
|
||||||
|
title: SwitchTransformers
|
||||||
- local: model_doc/t5
|
- local: model_doc/t5
|
||||||
title: T5
|
title: T5
|
||||||
- local: model_doc/t5v1.1
|
- local: model_doc/t5v1.1
|
||||||
title: T5v1.1
|
title: T5v1.1
|
||||||
- local: model_doc/tapas
|
|
||||||
title: TAPAS
|
|
||||||
- local: model_doc/tapex
|
- local: model_doc/tapex
|
||||||
title: TAPEX
|
title: TAPEX
|
||||||
- local: model_doc/transfo-xl
|
- local: model_doc/transfo-xl
|
||||||
title: Transformer XL
|
title: Transformer XL
|
||||||
- local: model_doc/ul2
|
- local: model_doc/ul2
|
||||||
title: UL2
|
title: UL2
|
||||||
|
- local: model_doc/umt5
|
||||||
|
title: UMT5
|
||||||
|
- local: model_doc/xmod
|
||||||
|
title: X-MOD
|
||||||
- local: model_doc/xglm
|
- local: model_doc/xglm
|
||||||
title: XGLM
|
title: XGLM
|
||||||
- local: model_doc/xlm
|
- local: model_doc/xlm
|
||||||
@ -365,6 +443,8 @@
|
|||||||
title: XLM-RoBERTa
|
title: XLM-RoBERTa
|
||||||
- local: model_doc/xlm-roberta-xl
|
- local: model_doc/xlm-roberta-xl
|
||||||
title: XLM-RoBERTa-XL
|
title: XLM-RoBERTa-XL
|
||||||
|
- local: model_doc/xlm-v
|
||||||
|
title: XLM-V
|
||||||
- local: model_doc/xlnet
|
- local: model_doc/xlnet
|
||||||
title: XLNet
|
title: XLNet
|
||||||
- local: model_doc/yoso
|
- local: model_doc/yoso
|
||||||
@ -374,32 +454,56 @@
|
|||||||
sections:
|
sections:
|
||||||
- local: model_doc/beit
|
- local: model_doc/beit
|
||||||
title: BEiT
|
title: BEiT
|
||||||
|
- local: model_doc/bit
|
||||||
|
title: BiT
|
||||||
- local: model_doc/conditional_detr
|
- local: model_doc/conditional_detr
|
||||||
title: Conditional DETR
|
title: Conditional DETR
|
||||||
- local: model_doc/convnext
|
- local: model_doc/convnext
|
||||||
title: ConvNeXT
|
title: ConvNeXT
|
||||||
|
- local: model_doc/convnextv2
|
||||||
|
title: ConvNeXTV2
|
||||||
- local: model_doc/cvt
|
- local: model_doc/cvt
|
||||||
title: CvT
|
title: CvT
|
||||||
- local: model_doc/deformable_detr
|
- local: model_doc/deformable_detr
|
||||||
title: Deformable DETR
|
title: Deformable DETR
|
||||||
- local: model_doc/deit
|
- local: model_doc/deit
|
||||||
title: DeiT
|
title: DeiT
|
||||||
|
- local: model_doc/deta
|
||||||
|
title: DETA
|
||||||
- local: model_doc/detr
|
- local: model_doc/detr
|
||||||
title: DETR
|
title: DETR
|
||||||
|
- local: model_doc/dinat
|
||||||
|
title: DiNAT
|
||||||
- local: model_doc/dit
|
- local: model_doc/dit
|
||||||
title: DiT
|
title: DiT
|
||||||
- local: model_doc/dpt
|
- local: model_doc/dpt
|
||||||
title: DPT
|
title: DPT
|
||||||
|
- local: model_doc/efficientformer
|
||||||
|
title: EfficientFormer
|
||||||
|
- local: model_doc/efficientnet
|
||||||
|
title: EfficientNet
|
||||||
|
- local: model_doc/focalnet
|
||||||
|
title: FocalNet
|
||||||
- local: model_doc/glpn
|
- local: model_doc/glpn
|
||||||
title: GLPN
|
title: GLPN
|
||||||
- local: model_doc/imagegpt
|
- local: model_doc/imagegpt
|
||||||
title: ImageGPT
|
title: ImageGPT
|
||||||
- local: model_doc/levit
|
- local: model_doc/levit
|
||||||
title: LeViT
|
title: LeViT
|
||||||
|
- local: model_doc/mask2former
|
||||||
|
title: Mask2Former
|
||||||
- local: model_doc/maskformer
|
- local: model_doc/maskformer
|
||||||
title: MaskFormer
|
title: MaskFormer
|
||||||
|
- local: model_doc/mobilenet_v1
|
||||||
|
title: MobileNetV1
|
||||||
|
- local: model_doc/mobilenet_v2
|
||||||
|
title: MobileNetV2
|
||||||
- local: model_doc/mobilevit
|
- local: model_doc/mobilevit
|
||||||
title: MobileViT
|
title: MobileViT
|
||||||
|
- local: model_doc/mobilevitv2
|
||||||
|
title: MobileViTV2
|
||||||
|
- local: model_doc/nat
|
||||||
|
title: NAT
|
||||||
- local: model_doc/poolformer
|
- local: model_doc/poolformer
|
||||||
title: PoolFormer
|
title: PoolFormer
|
||||||
- local: model_doc/regnet
|
- local: model_doc/regnet
|
||||||
@ -408,29 +512,55 @@
|
|||||||
title: ResNet
|
title: ResNet
|
||||||
- local: model_doc/segformer
|
- local: model_doc/segformer
|
||||||
title: SegFormer
|
title: SegFormer
|
||||||
|
- local: model_doc/swiftformer
|
||||||
|
title: SwiftFormer
|
||||||
- local: model_doc/swin
|
- local: model_doc/swin
|
||||||
title: Swin Transformer
|
title: Swin Transformer
|
||||||
- local: model_doc/swinv2
|
- local: model_doc/swinv2
|
||||||
title: Swin Transformer V2
|
title: Swin Transformer V2
|
||||||
|
- local: model_doc/swin2sr
|
||||||
|
title: Swin2SR
|
||||||
|
- local: model_doc/table-transformer
|
||||||
|
title: Table Transformer
|
||||||
|
- local: model_doc/timesformer
|
||||||
|
title: TimeSformer
|
||||||
|
- local: model_doc/upernet
|
||||||
|
title: UperNet
|
||||||
- local: model_doc/van
|
- local: model_doc/van
|
||||||
title: VAN
|
title: VAN
|
||||||
- local: model_doc/videomae
|
- local: model_doc/videomae
|
||||||
title: VideoMAE
|
title: VideoMAE
|
||||||
- local: model_doc/vit
|
- local: model_doc/vit
|
||||||
title: Vision Transformer (ViT)
|
title: Vision Transformer (ViT)
|
||||||
|
- local: model_doc/vit_hybrid
|
||||||
|
title: ViT Hybrid
|
||||||
- local: model_doc/vit_mae
|
- local: model_doc/vit_mae
|
||||||
title: ViTMAE
|
title: ViTMAE
|
||||||
- local: model_doc/vit_msn
|
- local: model_doc/vit_msn
|
||||||
title: ViTMSN
|
title: ViTMSN
|
||||||
|
- local: model_doc/vivit
|
||||||
|
title: ViViT
|
||||||
- local: model_doc/yolos
|
- local: model_doc/yolos
|
||||||
title: YOLOS
|
title: YOLOS
|
||||||
title: Vision models
|
title: Vision models
|
||||||
- isExpanded: false
|
- isExpanded: false
|
||||||
sections:
|
sections:
|
||||||
|
- local: model_doc/audio-spectrogram-transformer
|
||||||
|
title: Audio Spectrogram Transformer
|
||||||
|
- local: model_doc/bark
|
||||||
|
title: Bark
|
||||||
|
- local: model_doc/clap
|
||||||
|
title: CLAP
|
||||||
|
- local: model_doc/encodec
|
||||||
|
title: EnCodec
|
||||||
- local: model_doc/hubert
|
- local: model_doc/hubert
|
||||||
title: Hubert
|
title: Hubert
|
||||||
- local: model_doc/mctct
|
- local: model_doc/mctct
|
||||||
title: MCTCT
|
title: MCTCT
|
||||||
|
- local: model_doc/mms
|
||||||
|
title: MMS
|
||||||
|
- local: model_doc/musicgen
|
||||||
|
title: MusicGen
|
||||||
- local: model_doc/sew
|
- local: model_doc/sew
|
||||||
title: SEW
|
title: SEW
|
||||||
- local: model_doc/sew-d
|
- local: model_doc/sew-d
|
||||||
@ -439,6 +569,8 @@
|
|||||||
title: Speech2Text
|
title: Speech2Text
|
||||||
- local: model_doc/speech_to_text_2
|
- local: model_doc/speech_to_text_2
|
||||||
title: Speech2Text2
|
title: Speech2Text2
|
||||||
|
- local: model_doc/speecht5
|
||||||
|
title: SpeechT5
|
||||||
- local: model_doc/unispeech
|
- local: model_doc/unispeech
|
||||||
title: UniSpeech
|
title: UniSpeech
|
||||||
- local: model_doc/unispeech-sat
|
- local: model_doc/unispeech-sat
|
||||||
@ -460,32 +592,70 @@
|
|||||||
title: Audio models
|
title: Audio models
|
||||||
- isExpanded: false
|
- isExpanded: false
|
||||||
sections:
|
sections:
|
||||||
|
- local: model_doc/align
|
||||||
|
title: ALIGN
|
||||||
|
- local: model_doc/altclip
|
||||||
|
title: AltCLIP
|
||||||
|
- local: model_doc/blip
|
||||||
|
title: BLIP
|
||||||
|
- local: model_doc/blip-2
|
||||||
|
title: BLIP-2
|
||||||
|
- local: model_doc/bridgetower
|
||||||
|
title: BridgeTower
|
||||||
|
- local: model_doc/chinese_clip
|
||||||
|
title: Chinese-CLIP
|
||||||
- local: model_doc/clip
|
- local: model_doc/clip
|
||||||
title: CLIP
|
title: CLIP
|
||||||
|
- local: model_doc/clipseg
|
||||||
|
title: CLIPSeg
|
||||||
- local: model_doc/data2vec
|
- local: model_doc/data2vec
|
||||||
title: Data2Vec
|
title: Data2Vec
|
||||||
|
- local: model_doc/deplot
|
||||||
|
title: DePlot
|
||||||
- local: model_doc/donut
|
- local: model_doc/donut
|
||||||
title: Donut
|
title: Donut
|
||||||
- local: model_doc/flava
|
- local: model_doc/flava
|
||||||
title: FLAVA
|
title: FLAVA
|
||||||
|
- local: model_doc/git
|
||||||
|
title: GIT
|
||||||
- local: model_doc/groupvit
|
- local: model_doc/groupvit
|
||||||
title: GroupViT
|
title: GroupViT
|
||||||
|
- local: model_doc/instructblip
|
||||||
|
title: InstructBLIP
|
||||||
|
- local: model_doc/layoutlm
|
||||||
|
title: LayoutLM
|
||||||
- local: model_doc/layoutlmv2
|
- local: model_doc/layoutlmv2
|
||||||
title: LayoutLMV2
|
title: LayoutLMV2
|
||||||
- local: model_doc/layoutlmv3
|
- local: model_doc/layoutlmv3
|
||||||
title: LayoutLMV3
|
title: LayoutLMV3
|
||||||
- local: model_doc/layoutxlm
|
- local: model_doc/layoutxlm
|
||||||
title: LayoutXLM
|
title: LayoutXLM
|
||||||
|
- local: model_doc/lilt
|
||||||
|
title: LiLT
|
||||||
- local: model_doc/lxmert
|
- local: model_doc/lxmert
|
||||||
title: LXMERT
|
title: LXMERT
|
||||||
|
- local: model_doc/matcha
|
||||||
|
title: MatCha
|
||||||
|
- local: model_doc/mgp-str
|
||||||
|
title: MGP-STR
|
||||||
|
- local: model_doc/oneformer
|
||||||
|
title: OneFormer
|
||||||
- local: model_doc/owlvit
|
- local: model_doc/owlvit
|
||||||
title: OWL-ViT
|
title: OWL-ViT
|
||||||
- local: model_doc/perceiver
|
- local: model_doc/perceiver
|
||||||
title: Perceiver
|
title: Perceiver
|
||||||
|
- local: model_doc/pix2struct
|
||||||
|
title: Pix2Struct
|
||||||
|
- local: model_doc/sam
|
||||||
|
title: Segment Anything
|
||||||
- local: model_doc/speech-encoder-decoder
|
- local: model_doc/speech-encoder-decoder
|
||||||
title: Speech Encoder Decoder Models
|
title: Speech Encoder Decoder Models
|
||||||
|
- local: model_doc/tapas
|
||||||
|
title: TAPAS
|
||||||
- local: model_doc/trocr
|
- local: model_doc/trocr
|
||||||
title: TrOCR
|
title: TrOCR
|
||||||
|
- local: model_doc/tvlt
|
||||||
|
title: TVLT
|
||||||
- local: model_doc/vilt
|
- local: model_doc/vilt
|
||||||
title: ViLT
|
title: ViLT
|
||||||
- local: model_doc/vision-encoder-decoder
|
- local: model_doc/vision-encoder-decoder
|
||||||
@ -506,9 +676,18 @@
|
|||||||
title: Reinforcement learning models
|
title: Reinforcement learning models
|
||||||
- isExpanded: false
|
- isExpanded: false
|
||||||
sections:
|
sections:
|
||||||
|
- local: model_doc/autoformer
|
||||||
|
title: Autoformer
|
||||||
|
- local: model_doc/informer
|
||||||
|
title: Informer
|
||||||
- local: model_doc/time_series_transformer
|
- local: model_doc/time_series_transformer
|
||||||
title: Time Series Transformer
|
title: Time Series Transformer
|
||||||
title: Time series models
|
title: Time series models
|
||||||
|
- isExpanded: false
|
||||||
|
sections:
|
||||||
|
- local: model_doc/graphormer
|
||||||
|
title: Graphormer
|
||||||
|
title: Graph models
|
||||||
title: Models
|
title: Models
|
||||||
- sections:
|
- sections:
|
||||||
- local: internal/modeling_utils
|
- local: internal/modeling_utils
|
||||||
@ -523,7 +702,11 @@
|
|||||||
title: Utilities for Generation
|
title: Utilities for Generation
|
||||||
- local: internal/image_processing_utils
|
- local: internal/image_processing_utils
|
||||||
title: Utilities for Image Processors
|
title: Utilities for Image Processors
|
||||||
|
- local: internal/audio_utils
|
||||||
|
title: Utilities for Audio processing
|
||||||
- local: internal/file_utils
|
- local: internal/file_utils
|
||||||
title: General Utilities
|
title: General Utilities
|
||||||
|
- local: internal/time_series_utils
|
||||||
|
title: Utilities for Time Series
|
||||||
title: Internal Helpers
|
title: Internal Helpers
|
||||||
title: API
|
title: API
|
||||||
|
|||||||
@ -8,6 +8,10 @@ http://www.apache.org/licenses/LICENSE-2.0
|
|||||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
specific language governing permissions and limitations under the License.
|
specific language governing permissions and limitations under the License.
|
||||||
|
|
||||||
|
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||||
|
rendered properly in your Markdown viewer.
|
||||||
|
|
||||||
-->
|
-->
|
||||||
|
|
||||||
# Distributed training with 🤗 Accelerate
|
# Distributed training with 🤗 Accelerate
|
||||||
@ -7,36 +7,34 @@ http://www.apache.org/licenses/LICENSE-2.0
|
|||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
|
|
||||||
|
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||||
|
rendered properly in your Markdown viewer.
|
||||||
|
|
||||||
-->
|
-->
|
||||||
|
|
||||||
# How to add a model to 🤗 Transformers?
|
# How to add a model to 🤗 Transformers?
|
||||||
|
|
||||||
Adding a new model is often difficult and requires an in-depth knowledge of the 🤗 Transformers library and ideally also
|
The 🤗 Transformers library is often able to offer new models thanks to community contributors. But this can be a challenging project and requires an in-depth knowledge of the 🤗 Transformers library and the model to implement. At Hugging Face, we're trying to empower more of the community to actively add models and we've put together this guide to walk you through the process of adding a PyTorch model (make sure you have [PyTorch installed](https://pytorch.org/get-started/locally/)).
|
||||||
of the model's original repository. At Hugging Face, we are trying to empower the community more and more to add models
|
|
||||||
independently. Thus, for some new models that the community wants to be added to 🤗 Transformers, we create a customized
|
|
||||||
*call-for-model-addition* that explains step-by-step how to add the requested model. With this
|
|
||||||
*call-for-model-addition*, we want to teach a motivated and experienced contributor of the community how to port a
|
|
||||||
model to 🤗 Transformers.
|
|
||||||
|
|
||||||
If this sounds like something you would be interested in, feel free to check out the currently open
|
<Tip>
|
||||||
“calls-for-model-addition” [here](https://github.com/huggingface/transformers/tree/main/templates/adding_a_new_model/open_model_proposals/README.md)
|
|
||||||
and to contact us.
|
|
||||||
|
|
||||||
If selected, you will then work closely with one member of the Hugging Face team to integrate the model into 🤗
|
If you're interested in implementing a TensorFlow model, take a look at the [How to convert a 🤗 Transformers model to TensorFlow](add_tensorflow_model) guide!
|
||||||
Transformers. By doing so, you will both gain a theoretical and deep practical understanding of the proposed model. But
|
|
||||||
more importantly, you will have made a major open-source contribution to 🤗 Transformers. Along the way, you will:
|
|
||||||
|
|
||||||
- get insights into open-source best practices
|
</Tip>
|
||||||
- understand the design principles of one of the most popular NLP libraries
|
|
||||||
- learn how to do efficiently test large NLP models
|
|
||||||
- learn how to integrate Python utilities like `black`, `isort`, `make fix-copies` into a library to always
|
|
||||||
ensure clean and readable code
|
|
||||||
|
|
||||||
We are also more than happy if you want to add a model that cannot be found in the “calls-for-model-addition” folder.
|
Along the way, you'll:
|
||||||
The following sections explain in detail how to add a new model. It might also be very helpful to check out already
|
|
||||||
added models to see if those resemble the model you would like to add [here](https://github.com/huggingface/transformers/pulls?q=is%3Apr+label%3A%22PR+for+Model+Addition%22+is%3Aclosed).
|
|
||||||
|
|
||||||
To start, let's try to get a general overview of the Transformers library.
|
- get insights into open-source best practices
|
||||||
|
- understand the design principles behind one of the most popular deep learning libraries
|
||||||
|
- learn how to efficiently test large models
|
||||||
|
- learn how to integrate Python utilities like `black`, `ruff`, and `make fix-copies` to ensure clean and readable code
|
||||||
|
|
||||||
|
A Hugging Face team member will be available to help you along the way so you'll never be alone. 🤗 ❤️
|
||||||
|
|
||||||
|
To get started, open a [New model addition](https://github.com/huggingface/transformers/issues/new?assignees=&labels=New+model&template=new-model-addition.yml) issue for the model you want to see in 🤗 Transformers. If you're not especially picky about contributing a specific model, you can filter by the [New model label](https://github.com/huggingface/transformers/labels/New%20model) to see if there are any unclaimed model requests and work on it.
|
||||||
|
|
||||||
|
Once you've opened a new model request, the first step is to get familiar with 🤗 Transformers if you aren't already!
|
||||||
|
|
||||||
## General overview of 🤗 Transformers
|
## General overview of 🤗 Transformers
|
||||||
|
|
||||||
@ -144,20 +142,20 @@ In the following, we try to give you a general recipe that we found most useful
|
|||||||
The following list is a summary of everything that has to be done to add a model and can be used by you as a To-Do
|
The following list is a summary of everything that has to be done to add a model and can be used by you as a To-Do
|
||||||
List:
|
List:
|
||||||
|
|
||||||
- 1. ☐ (Optional) Understood theoretical aspects
|
☐ (Optional) Understood the model's theoretical aspects<br>
|
||||||
- 2. ☐ Prepared transformers dev environment
|
☐ Prepared 🤗 Transformers dev environment<br>
|
||||||
- 3. ☐ Set up debugging environment of the original repository
|
☐ Set up debugging environment of the original repository<br>
|
||||||
- 4. ☐ Created script that successfully runs forward pass using original repository and checkpoint
|
☐ Created script that successfully runs the `forward()` pass using the original repository and checkpoint<br>
|
||||||
- 5. ☐ Successfully added the model skeleton to Transformers
|
☐ Successfully added the model skeleton to 🤗 Transformers<br>
|
||||||
- 6. ☐ Successfully converted original checkpoint to Transformers checkpoint
|
☐ Successfully converted original checkpoint to 🤗 Transformers checkpoint<br>
|
||||||
- 7. ☐ Successfully ran forward pass in Transformers that gives identical output to original checkpoint
|
☐ Successfully ran `forward()` pass in 🤗 Transformers that gives identical output to original checkpoint<br>
|
||||||
- 8. ☐ Finished model tests in Transformers
|
☐ Finished model tests in 🤗 Transformers<br>
|
||||||
- 9. ☐ Successfully added Tokenizer in Transformers
|
☐ Successfully added tokenizer in 🤗 Transformers<br>
|
||||||
- 10. ☐ Run end-to-end integration tests
|
☐ Run end-to-end integration tests<br>
|
||||||
- 11. ☐ Finished docs
|
☐ Finished docs<br>
|
||||||
- 12. ☐ Uploaded model weights to the hub
|
☐ Uploaded model weights to the Hub<br>
|
||||||
- 13. ☐ Submitted the pull request
|
☐ Submitted the pull request<br>
|
||||||
- 14. ☐ (Optional) Added a demo notebook
|
☐ (Optional) Added a demo notebook
|
||||||
|
|
||||||
To begin with, we usually recommend to start by getting a good theoretical understanding of `BrandNewBert`. However,
|
To begin with, we usually recommend to start by getting a good theoretical understanding of `BrandNewBert`. However,
|
||||||
if you prefer to understand the theoretical aspects of the model *on-the-job*, then it is totally fine to directly dive
|
if you prefer to understand the theoretical aspects of the model *on-the-job*, then it is totally fine to directly dive
|
||||||
@ -208,7 +206,15 @@ source .env/bin/activate
|
|||||||
pip install -e ".[dev]"
|
pip install -e ".[dev]"
|
||||||
```
|
```
|
||||||
|
|
||||||
and return to the parent directory
|
Depending on your OS, and since the number of optional dependencies of Transformers is growing, you might get a
|
||||||
|
failure with this command. If that's the case make sure to install the Deep Learning framework you are working with
|
||||||
|
(PyTorch, TensorFlow and/or Flax) then do:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
pip install -e ".[quality]"
|
||||||
|
```
|
||||||
|
|
||||||
|
which should be enough for most use cases. You can then return to the parent directory
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
cd ..
|
cd ..
|
||||||
@ -274,7 +280,7 @@ In general, there are two possible debugging environments for running the origin
|
|||||||
Jupyter notebooks have the advantage that they allow for cell-by-cell execution which can be helpful to better split
|
Jupyter notebooks have the advantage that they allow for cell-by-cell execution which can be helpful to better split
|
||||||
logical components from one another and to have faster debugging cycles as intermediate results can be stored. Also,
|
logical components from one another and to have faster debugging cycles as intermediate results can be stored. Also,
|
||||||
notebooks are often easier to share with other contributors, which might be very helpful if you want to ask the Hugging
|
notebooks are often easier to share with other contributors, which might be very helpful if you want to ask the Hugging
|
||||||
Face team for help. If you are familiar with Jupiter notebooks, we strongly recommend you to work with them.
|
Face team for help. If you are familiar with Jupyter notebooks, we strongly recommend you to work with them.
|
||||||
|
|
||||||
The obvious disadvantage of Jupyter notebooks is that if you are not used to working with them you will have to spend
|
The obvious disadvantage of Jupyter notebooks is that if you are not used to working with them you will have to spend
|
||||||
some time adjusting to the new programming environment and that you might not be able to use your known debugging tools
|
some time adjusting to the new programming environment and that you might not be able to use your known debugging tools
|
||||||
@ -498,6 +504,48 @@ model = BrandNewBertModel(BrandNewBertConfig())
|
|||||||
The above command will create a model according to the default parameters as defined in `BrandNewBertConfig()` with
|
The above command will create a model according to the default parameters as defined in `BrandNewBertConfig()` with
|
||||||
random weights, thus making sure that the `init()` methods of all components works.
|
random weights, thus making sure that the `init()` methods of all components works.
|
||||||
|
|
||||||
|
Note that all random initialization should happen in the `_init_weights` method of your `BrandnewBertPreTrainedModel`
|
||||||
|
class. It should initialize all leaf modules depending on the variables of the config. Here is an example with the
|
||||||
|
BERT `_init_weights` method:
|
||||||
|
|
||||||
|
```py
|
||||||
|
def _init_weights(self, module):
|
||||||
|
"""Initialize the weights"""
|
||||||
|
if isinstance(module, nn.Linear):
|
||||||
|
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
|
||||||
|
if module.bias is not None:
|
||||||
|
module.bias.data.zero_()
|
||||||
|
elif isinstance(module, nn.Embedding):
|
||||||
|
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
|
||||||
|
if module.padding_idx is not None:
|
||||||
|
module.weight.data[module.padding_idx].zero_()
|
||||||
|
elif isinstance(module, nn.LayerNorm):
|
||||||
|
module.bias.data.zero_()
|
||||||
|
module.weight.data.fill_(1.0)
|
||||||
|
```
|
||||||
|
|
||||||
|
You can have some more custom schemes if you need a special initialization for some modules. For instance, in
|
||||||
|
`Wav2Vec2ForPreTraining`, the last two linear layers need to have the initialization of the regular PyTorch `nn.Linear`
|
||||||
|
but all the other ones should use an initialization as above. This is coded like this:
|
||||||
|
|
||||||
|
```py
|
||||||
|
def _init_weights(self, module):
|
||||||
|
"""Initialize the weights"""
|
||||||
|
if isinstnace(module, Wav2Vec2ForPreTraining):
|
||||||
|
module.project_hid.reset_parameters()
|
||||||
|
module.project_q.reset_parameters()
|
||||||
|
module.project_hid._is_hf_initialized = True
|
||||||
|
module.project_q._is_hf_initialized = True
|
||||||
|
elif isinstance(module, nn.Linear):
|
||||||
|
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
|
||||||
|
if module.bias is not None:
|
||||||
|
module.bias.data.zero_()
|
||||||
|
```
|
||||||
|
|
||||||
|
The `_is_hf_initialized` flag is internally used to make sure we only initialize a submodule once. By setting it to
|
||||||
|
`True` for `module.project_q` and `module.project_hid`, we make sure the custom initialization we did is not overridden later on,
|
||||||
|
the `_init_weights` function won't be applied to them.
|
||||||
|
|
||||||
**6. Write a conversion script**
|
**6. Write a conversion script**
|
||||||
|
|
||||||
Next, you should write a conversion script that lets you convert the checkpoint you used to debug *brand_new_bert* in
|
Next, you should write a conversion script that lets you convert the checkpoint you used to debug *brand_new_bert* in
|
||||||
@ -634,7 +682,7 @@ model.save_pretrained("/path/to/converted/checkpoint/folder")
|
|||||||
**7. Implement the forward pass**
|
**7. Implement the forward pass**
|
||||||
|
|
||||||
Having managed to correctly load the pretrained weights into the 🤗 Transformers implementation, you should now make
|
Having managed to correctly load the pretrained weights into the 🤗 Transformers implementation, you should now make
|
||||||
sure that the forward pass is correctly implemented. In [Get familiar with the original repository](#run-a-pretrained-checkpoint-using-the-original-repository), you have already created a script that runs a forward
|
sure that the forward pass is correctly implemented. In [Get familiar with the original repository](#34-run-a-pretrained-checkpoint-using-the-original-repository), you have already created a script that runs a forward
|
||||||
pass of the model using the original repository. Now you should write an analogous script using the 🤗 Transformers
|
pass of the model using the original repository. Now you should write an analogous script using the 🤗 Transformers
|
||||||
implementation instead of the original one. It should look as follows:
|
implementation instead of the original one. It should look as follows:
|
||||||
|
|
||||||
@ -773,7 +821,7 @@ tests for you.
|
|||||||
|
|
||||||
Now, all the necessary functionality for *brand_new_bert* is added - you're almost done! The only thing left to add is
|
Now, all the necessary functionality for *brand_new_bert* is added - you're almost done! The only thing left to add is
|
||||||
a nice docstring and a doc page. The Cookiecutter should have added a template file called
|
a nice docstring and a doc page. The Cookiecutter should have added a template file called
|
||||||
`docs/source/model_doc/brand_new_bert.rst` that you should fill out. Users of your model will usually first look at
|
`docs/source/model_doc/brand_new_bert.md` that you should fill out. Users of your model will usually first look at
|
||||||
this page before using your model. Hence, the documentation must be understandable and concise. It is very useful for
|
this page before using your model. Hence, the documentation must be understandable and concise. It is very useful for
|
||||||
the community to add some *Tips* to show how the model should be used. Don't hesitate to ping the Hugging Face team
|
the community to add some *Tips* to show how the model should be used. Don't hesitate to ping the Hugging Face team
|
||||||
regarding the docstrings.
|
regarding the docstrings.
|
||||||
@ -7,12 +7,16 @@ http://www.apache.org/licenses/LICENSE-2.0
|
|||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
|
|
||||||
|
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||||
|
rendered properly in your Markdown viewer.
|
||||||
|
|
||||||
-->
|
-->
|
||||||
|
|
||||||
# How to create a custom pipeline?
|
# How to create a custom pipeline?
|
||||||
|
|
||||||
In this guide, we will see how to create a custom pipeline and share it on the [Hub](hf.co/models) or add it to the
|
In this guide, we will see how to create a custom pipeline and share it on the [Hub](hf.co/models) or add it to the
|
||||||
Transformers library.
|
🤗 Transformers library.
|
||||||
|
|
||||||
First and foremost, you need to decide the raw entries the pipeline will be able to take. It can be strings, raw bytes,
|
First and foremost, you need to decide the raw entries the pipeline will be able to take. It can be strings, raw bytes,
|
||||||
dictionaries or whatever seems to be the most likely desired input. Try to keep these inputs as pure Python as possible
|
dictionaries or whatever seems to be the most likely desired input. Try to keep these inputs as pure Python as possible
|
||||||
@ -22,8 +26,8 @@ pipeline (`preprocess`).
|
|||||||
Then define the `outputs`. Same policy as the `inputs`. The simpler, the better. Those will be the outputs of
|
Then define the `outputs`. Same policy as the `inputs`. The simpler, the better. Those will be the outputs of
|
||||||
`postprocess` method.
|
`postprocess` method.
|
||||||
|
|
||||||
Start by inheriting the base class `Pipeline`. with the 4 methods needed to implement `preprocess`,
|
Start by inheriting the base class `Pipeline` with the 4 methods needed to implement `preprocess`,
|
||||||
`_forward`, `postprocess` and `_sanitize_parameters`.
|
`_forward`, `postprocess`, and `_sanitize_parameters`.
|
||||||
|
|
||||||
|
|
||||||
```python
|
```python
|
||||||
@ -62,14 +66,14 @@ contain more information and is usually a `Dict`.
|
|||||||
called method as it contains safeguards to make sure everything is working on the expected device. If anything is
|
called method as it contains safeguards to make sure everything is working on the expected device. If anything is
|
||||||
linked to a real model it belongs in the `_forward` method, anything else is in the preprocess/postprocess.
|
linked to a real model it belongs in the `_forward` method, anything else is in the preprocess/postprocess.
|
||||||
|
|
||||||
`postprocess` methods will take the output of `_forward` and turn it into the final output that were decided
|
`postprocess` methods will take the output of `_forward` and turn it into the final output that was decided
|
||||||
earlier.
|
earlier.
|
||||||
|
|
||||||
`_sanitize_parameters` exists to allow users to pass any parameters whenever they wish, be it at initialization
|
`_sanitize_parameters` exists to allow users to pass any parameters whenever they wish, be it at initialization
|
||||||
time `pipeline(...., maybe_arg=4)` or at call time `pipe = pipeline(...); output = pipe(...., maybe_arg=4)`.
|
time `pipeline(...., maybe_arg=4)` or at call time `pipe = pipeline(...); output = pipe(...., maybe_arg=4)`.
|
||||||
|
|
||||||
The returns of `_sanitize_parameters` are the 3 dicts of kwargs that will be passed directly to `preprocess`,
|
The returns of `_sanitize_parameters` are the 3 dicts of kwargs that will be passed directly to `preprocess`,
|
||||||
`_forward` and `postprocess`. Don't fill anything if the caller didn't call with any extra parameter. That
|
`_forward`, and `postprocess`. Don't fill anything if the caller didn't call with any extra parameter. That
|
||||||
allows to keep the default arguments in the function definition which is always more "natural".
|
allows to keep the default arguments in the function definition which is always more "natural".
|
||||||
|
|
||||||
A classic example would be a `top_k` argument in the post processing in classification tasks.
|
A classic example would be a `top_k` argument in the post processing in classification tasks.
|
||||||
@ -126,7 +130,7 @@ PIPELINE_REGISTRY.register_pipeline(
|
|||||||
)
|
)
|
||||||
```
|
```
|
||||||
|
|
||||||
You can specify a default model if you want, in which case it should come with a specific revision (which can be the name of a branch or a commit hash, here we took `"abcdef"`) as well was the type:
|
You can specify a default model if you want, in which case it should come with a specific revision (which can be the name of a branch or a commit hash, here we took `"abcdef"`) as well as the type:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
PIPELINE_REGISTRY.register_pipeline(
|
PIPELINE_REGISTRY.register_pipeline(
|
||||||
@ -225,9 +229,9 @@ from transformers import pipeline
|
|||||||
classifier = pipeline(model="{your_username}/test-dynamic-pipeline", trust_remote_code=True)
|
classifier = pipeline(model="{your_username}/test-dynamic-pipeline", trust_remote_code=True)
|
||||||
```
|
```
|
||||||
|
|
||||||
## Add the pipeline to Transformers
|
## Add the pipeline to 🤗 Transformers
|
||||||
|
|
||||||
If you want to contribute your pipeline to Transformers, you will need to add a new module in the `pipelines` submodule
|
If you want to contribute your pipeline to 🤗 Transformers, you will need to add a new module in the `pipelines` submodule
|
||||||
with the code of your pipeline, then add it in the list of tasks defined in `pipelines/__init__.py`.
|
with the code of your pipeline, then add it in the list of tasks defined in `pipelines/__init__.py`.
|
||||||
|
|
||||||
Then you will need to add tests. Create a new file `tests/test_pipelines_MY_PIPELINE.py` with example with the other tests.
|
Then you will need to add tests. Create a new file `tests/test_pipelines_MY_PIPELINE.py` with example with the other tests.
|
||||||
@ -237,7 +241,7 @@ architecture as defined by `model_mapping` and `tf_model_mapping`.
|
|||||||
|
|
||||||
This is very important to test future compatibility, meaning if someone adds a new model for
|
This is very important to test future compatibility, meaning if someone adds a new model for
|
||||||
`XXXForQuestionAnswering` then the pipeline test will attempt to run on it. Because the models are random it's
|
`XXXForQuestionAnswering` then the pipeline test will attempt to run on it. Because the models are random it's
|
||||||
impossible to check for actual values, that's why There is a helper `ANY` that will simply attempt to match the
|
impossible to check for actual values, that's why there is a helper `ANY` that will simply attempt to match the
|
||||||
output of the pipeline TYPE.
|
output of the pipeline TYPE.
|
||||||
|
|
||||||
You also *need* to implement 2 (ideally 4) tests.
|
You also *need* to implement 2 (ideally 4) tests.
|
||||||
@ -248,7 +252,7 @@ You also *need* to implement 2 (ideally 4) tests.
|
|||||||
and test the pipeline outputs. The results should be the same as `test_small_model_pt`.
|
and test the pipeline outputs. The results should be the same as `test_small_model_pt`.
|
||||||
- `test_large_model_pt` (`optional`): Tests the pipeline on a real pipeline where the results are supposed to
|
- `test_large_model_pt` (`optional`): Tests the pipeline on a real pipeline where the results are supposed to
|
||||||
make sense. These tests are slow and should be marked as such. Here the goal is to showcase the pipeline and to make
|
make sense. These tests are slow and should be marked as such. Here the goal is to showcase the pipeline and to make
|
||||||
sure there is no drift in future releases
|
sure there is no drift in future releases.
|
||||||
- `test_large_model_tf` (`optional`): Tests the pipeline on a real pipeline where the results are supposed to
|
- `test_large_model_tf` (`optional`): Tests the pipeline on a real pipeline where the results are supposed to
|
||||||
make sense. These tests are slow and should be marked as such. Here the goal is to showcase the pipeline and to make
|
make sense. These tests are slow and should be marked as such. Here the goal is to showcase the pipeline and to make
|
||||||
sure there is no drift in future releases
|
sure there is no drift in future releases.
|
||||||
@ -7,6 +7,10 @@ http://www.apache.org/licenses/LICENSE-2.0
|
|||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
|
|
||||||
|
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||||
|
rendered properly in your Markdown viewer.
|
||||||
|
|
||||||
-->
|
-->
|
||||||
|
|
||||||
# How to convert a 🤗 Transformers model to TensorFlow?
|
# How to convert a 🤗 Transformers model to TensorFlow?
|
||||||
@ -119,6 +123,13 @@ source .env/bin/activate
|
|||||||
pip install -e ".[dev]"
|
pip install -e ".[dev]"
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Depending on your OS, and since the number of optional dependencies of Transformers is growing, you might get a
|
||||||
|
failure with this command. If that's the case make sure to install TensorFlow then do:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
pip install -e ".[quality]"
|
||||||
|
```
|
||||||
|
|
||||||
**Note:** You don't need to have CUDA installed. Making the new model work on CPU is sufficient.
|
**Note:** You don't need to have CUDA installed. Making the new model work on CPU is sufficient.
|
||||||
|
|
||||||
4. Create a branch with a descriptive name from your main branch
|
4. Create a branch with a descriptive name from your main branch
|
||||||
@ -179,7 +190,7 @@ Now it's time to finally start coding. Our suggested starting point is the PyTor
|
|||||||
`modeling_brand_new_bert.py` inside `src/transformers/models/brand_new_bert/` into
|
`modeling_brand_new_bert.py` inside `src/transformers/models/brand_new_bert/` into
|
||||||
`modeling_tf_brand_new_bert.py`. The goal of this section is to modify the file and update the import structure of
|
`modeling_tf_brand_new_bert.py`. The goal of this section is to modify the file and update the import structure of
|
||||||
🤗 Transformers such that you can import `TFBrandNewBert` and
|
🤗 Transformers such that you can import `TFBrandNewBert` and
|
||||||
`TFBrandNewBert.from_pretrained(model_repo, from_pt=True)` sucessfully loads a working TensorFlow *BrandNewBert* model.
|
`TFBrandNewBert.from_pretrained(model_repo, from_pt=True)` successfully loads a working TensorFlow *BrandNewBert* model.
|
||||||
|
|
||||||
Sadly, there is no prescription to convert a PyTorch model into TensorFlow. You can, however, follow our selection of
|
Sadly, there is no prescription to convert a PyTorch model into TensorFlow. You can, however, follow our selection of
|
||||||
tips to make the process as smooth as possible:
|
tips to make the process as smooth as possible:
|
||||||
@ -217,13 +228,13 @@ documentation pages. You can complete this part entirely following the patterns
|
|||||||
([example](https://github.com/huggingface/transformers/pull/18020/files)). Here's a list of the needed manual
|
([example](https://github.com/huggingface/transformers/pull/18020/files)). Here's a list of the needed manual
|
||||||
changes:
|
changes:
|
||||||
- Include all public classes of *BrandNewBert* in `src/transformers/__init__.py`
|
- Include all public classes of *BrandNewBert* in `src/transformers/__init__.py`
|
||||||
- Add *BrandNewBert* classes to the corresponing Auto classes in `src/transformers/models/auto/modeling_tf_auto.py`
|
- Add *BrandNewBert* classes to the corresponding Auto classes in `src/transformers/models/auto/modeling_tf_auto.py`
|
||||||
- Include the modeling file in the documentation test file list in `utils/documentation_tests.txt`
|
- Include the modeling file in the documentation test file list in `utils/documentation_tests.txt`
|
||||||
- Add the lazy loading classes related to *BrandNewBert* in `src/transformers/utils/dummy_tf_objects.py`
|
- Add the lazy loading classes related to *BrandNewBert* in `src/transformers/utils/dummy_tf_objects.py`
|
||||||
- Update the import structures for the public classes in `src/transformers/models/brand_new_bert/__init__.py`
|
- Update the import structures for the public classes in `src/transformers/models/brand_new_bert/__init__.py`
|
||||||
- Add the documentation pointers to the public methods of *BrandNewBert* in `docs/source/en/model_doc/brand_new_bert.mdx`
|
- Add the documentation pointers to the public methods of *BrandNewBert* in `docs/source/en/model_doc/brand_new_bert.md`
|
||||||
- Add yourself to the list of contributors to *BrandNewBert* in `docs/source/en/model_doc/brand_new_bert.mdx`
|
- Add yourself to the list of contributors to *BrandNewBert* in `docs/source/en/model_doc/brand_new_bert.md`
|
||||||
- Finally, add a green tick ✅ to the TensorFlow column of *BrandNewBert* in `docs/source/en/index.mdx`
|
- Finally, add a green tick ✅ to the TensorFlow column of *BrandNewBert* in `docs/source/en/index.md`
|
||||||
|
|
||||||
When you're happy with your implementation, run the following checklist to confirm that your model architecture is
|
When you're happy with your implementation, run the following checklist to confirm that your model architecture is
|
||||||
ready:
|
ready:
|
||||||
61
docs/source/en/attention.md
Normal file
61
docs/source/en/attention.md
Normal file
@ -0,0 +1,61 @@
|
|||||||
|
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
|
the License. You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
|
specific language governing permissions and limitations under the License.
|
||||||
|
|
||||||
|
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||||
|
rendered properly in your Markdown viewer.
|
||||||
|
|
||||||
|
-->
|
||||||
|
|
||||||
|
# Attention mechanisms
|
||||||
|
|
||||||
|
Most transformer models use full attention in the sense that the attention matrix is square. It can be a big
|
||||||
|
computational bottleneck when you have long texts. Longformer and reformer are models that try to be more efficient and
|
||||||
|
use a sparse version of the attention matrix to speed up training.
|
||||||
|
|
||||||
|
## LSH attention
|
||||||
|
|
||||||
|
[Reformer](#reformer) uses LSH attention. In the softmax(QK^t), only the biggest elements (in the softmax
|
||||||
|
dimension) of the matrix QK^t are going to give useful contributions. So for each query q in Q, we can consider only
|
||||||
|
the keys k in K that are close to q. A hash function is used to determine if q and k are close. The attention mask is
|
||||||
|
modified to mask the current token (except at the first position), because it will give a query and a key equal (so
|
||||||
|
very similar to each other). Since the hash can be a bit random, several hash functions are used in practice
|
||||||
|
(determined by a n_rounds parameter) and then are averaged together.
|
||||||
|
|
||||||
|
## Local attention
|
||||||
|
|
||||||
|
[Longformer](#longformer) uses local attention: often, the local context (e.g., what are the two tokens to the
|
||||||
|
left and right?) is enough to take action for a given token. Also, by stacking attention layers that have a small
|
||||||
|
window, the last layer will have a receptive field of more than just the tokens in the window, allowing them to build a
|
||||||
|
representation of the whole sentence.
|
||||||
|
|
||||||
|
Some preselected input tokens are also given global attention: for those few tokens, the attention matrix can access
|
||||||
|
all tokens and this process is symmetric: all other tokens have access to those specific tokens (on top of the ones in
|
||||||
|
their local window). This is shown in Figure 2d of the paper, see below for a sample attention mask:
|
||||||
|
|
||||||
|
<div class="flex justify-center">
|
||||||
|
<img scale="50 %" align="center" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/local_attention_mask.png"/>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
Using those attention matrices with less parameters then allows the model to have inputs having a bigger sequence
|
||||||
|
length.
|
||||||
|
|
||||||
|
## Other tricks
|
||||||
|
|
||||||
|
### Axial positional encodings
|
||||||
|
|
||||||
|
[Reformer](#reformer) uses axial positional encodings: in traditional transformer models, the positional encoding
|
||||||
|
E is a matrix of size \\(l\\) by \\(d\\), \\(l\\) being the sequence length and \\(d\\) the dimension of the
|
||||||
|
hidden state. If you have very long texts, this matrix can be huge and take way too much space on the GPU. To alleviate
|
||||||
|
that, axial positional encodings consist of factorizing that big matrix E in two smaller matrices E1 and E2, with
|
||||||
|
dimensions \\(l_{1} \times d_{1}\\) and \\(l_{2} \times d_{2}\\), such that \\(l_{1} \times l_{2} = l\\) and
|
||||||
|
\\(d_{1} + d_{2} = d\\) (with the product for the lengths, this ends up being way smaller). The embedding for time
|
||||||
|
step \\(j\\) in E is obtained by concatenating the embeddings for timestep \\(j \% l1\\) in E1 and \\(j // l1\\)
|
||||||
|
in E2.
|
||||||
@ -8,6 +8,10 @@ http://www.apache.org/licenses/LICENSE-2.0
|
|||||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
specific language governing permissions and limitations under the License.
|
specific language governing permissions and limitations under the License.
|
||||||
|
|
||||||
|
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||||
|
rendered properly in your Markdown viewer.
|
||||||
|
|
||||||
-->
|
-->
|
||||||
|
|
||||||
# Load pretrained instances with an AutoClass
|
# Load pretrained instances with an AutoClass
|
||||||
@ -23,6 +27,7 @@ Remember, architecture refers to the skeleton of the model and checkpoints are t
|
|||||||
In this tutorial, learn to:
|
In this tutorial, learn to:
|
||||||
|
|
||||||
* Load a pretrained tokenizer.
|
* Load a pretrained tokenizer.
|
||||||
|
* Load a pretrained image processor
|
||||||
* Load a pretrained feature extractor.
|
* Load a pretrained feature extractor.
|
||||||
* Load a pretrained processor.
|
* Load a pretrained processor.
|
||||||
* Load a pretrained model.
|
* Load a pretrained model.
|
||||||
@ -49,9 +54,20 @@ Then tokenize your input as shown below:
|
|||||||
'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]}
|
'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## AutoImageProcessor
|
||||||
|
|
||||||
|
For vision tasks, an image processor processes the image into the correct input format.
|
||||||
|
|
||||||
|
```py
|
||||||
|
>>> from transformers import AutoImageProcessor
|
||||||
|
|
||||||
|
>>> image_processor = AutoImageProcessor.from_pretrained("google/vit-base-patch16-224")
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
## AutoFeatureExtractor
|
## AutoFeatureExtractor
|
||||||
|
|
||||||
For audio and vision tasks, a feature extractor processes the audio signal or image into the correct input format.
|
For audio tasks, a feature extractor processes the audio signal the correct input format.
|
||||||
|
|
||||||
Load a feature extractor with [`AutoFeatureExtractor.from_pretrained`]:
|
Load a feature extractor with [`AutoFeatureExtractor.from_pretrained`]:
|
||||||
|
|
||||||
@ -65,7 +81,7 @@ Load a feature extractor with [`AutoFeatureExtractor.from_pretrained`]:
|
|||||||
|
|
||||||
## AutoProcessor
|
## AutoProcessor
|
||||||
|
|
||||||
Multimodal tasks require a processor that combines two types of preprocessing tools. For example, the [LayoutLMV2](model_doc/layoutlmv2) model requires a feature extractor to handle images and a tokenizer to handle text; a processor combines both of them.
|
Multimodal tasks require a processor that combines two types of preprocessing tools. For example, the [LayoutLMV2](model_doc/layoutlmv2) model requires an image processor to handle images and a tokenizer to handle text; a processor combines both of them.
|
||||||
|
|
||||||
Load a processor with [`AutoProcessor.from_pretrained`]:
|
Load a processor with [`AutoProcessor.from_pretrained`]:
|
||||||
|
|
||||||
@ -103,7 +119,7 @@ TensorFlow and Flax checkpoints are not affected, and can be loaded within PyTor
|
|||||||
|
|
||||||
</Tip>
|
</Tip>
|
||||||
|
|
||||||
Generally, we recommend using the `AutoTokenizer` class and the `AutoModelFor` class to load pretrained instances of models. This will ensure you load the correct architecture every time. In the next [tutorial](preprocessing), learn how to use your newly loaded tokenizer, feature extractor and processor to preprocess a dataset for fine-tuning.
|
Generally, we recommend using the `AutoTokenizer` class and the `AutoModelFor` class to load pretrained instances of models. This will ensure you load the correct architecture every time. In the next [tutorial](preprocessing), learn how to use your newly loaded tokenizer, image processor, feature extractor and processor to preprocess a dataset for fine-tuning.
|
||||||
</pt>
|
</pt>
|
||||||
<tf>
|
<tf>
|
||||||
Finally, the `TFAutoModelFor` classes let you load a pretrained model for a given task (see [here](model_doc/auto) for a complete list of available tasks). For example, load a model for sequence classification with [`TFAutoModelForSequenceClassification.from_pretrained`]:
|
Finally, the `TFAutoModelFor` classes let you load a pretrained model for a given task (see [here](model_doc/auto) for a complete list of available tasks). For example, load a model for sequence classification with [`TFAutoModelForSequenceClassification.from_pretrained`]:
|
||||||
@ -122,6 +138,6 @@ Easily reuse the same checkpoint to load an architecture for a different task:
|
|||||||
>>> model = TFAutoModelForTokenClassification.from_pretrained("distilbert-base-uncased")
|
>>> model = TFAutoModelForTokenClassification.from_pretrained("distilbert-base-uncased")
|
||||||
```
|
```
|
||||||
|
|
||||||
Generally, we recommend using the `AutoTokenizer` class and the `TFAutoModelFor` class to load pretrained instances of models. This will ensure you load the correct architecture every time. In the next [tutorial](preprocessing), learn how to use your newly loaded tokenizer, feature extractor and processor to preprocess a dataset for fine-tuning.
|
Generally, we recommend using the `AutoTokenizer` class and the `TFAutoModelFor` class to load pretrained instances of models. This will ensure you load the correct architecture every time. In the next [tutorial](preprocessing), learn how to use your newly loaded tokenizer, image processor, feature extractor and processor to preprocess a dataset for fine-tuning.
|
||||||
</tf>
|
</tf>
|
||||||
</frameworkcontent>
|
</frameworkcontent>
|
||||||
@ -8,6 +8,10 @@ http://www.apache.org/licenses/LICENSE-2.0
|
|||||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
specific language governing permissions and limitations under the License.
|
specific language governing permissions and limitations under the License.
|
||||||
|
|
||||||
|
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||||
|
rendered properly in your Markdown viewer.
|
||||||
|
|
||||||
-->
|
-->
|
||||||
|
|
||||||
# Benchmarks
|
# Benchmarks
|
||||||
@ -8,6 +8,10 @@ http://www.apache.org/licenses/LICENSE-2.0
|
|||||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
specific language governing permissions and limitations under the License.
|
specific language governing permissions and limitations under the License.
|
||||||
|
|
||||||
|
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||||
|
rendered properly in your Markdown viewer.
|
||||||
|
|
||||||
-->
|
-->
|
||||||
|
|
||||||
# BERTology
|
# BERTology
|
||||||
@ -21,6 +25,7 @@ There is a growing field of study concerned with investigating the inner working
|
|||||||
- Are Sixteen Heads Really Better than One? by Paul Michel, Omer Levy, Graham Neubig: https://arxiv.org/abs/1905.10650
|
- Are Sixteen Heads Really Better than One? by Paul Michel, Omer Levy, Graham Neubig: https://arxiv.org/abs/1905.10650
|
||||||
- What Does BERT Look At? An Analysis of BERT's Attention by Kevin Clark, Urvashi Khandelwal, Omer Levy, Christopher D.
|
- What Does BERT Look At? An Analysis of BERT's Attention by Kevin Clark, Urvashi Khandelwal, Omer Levy, Christopher D.
|
||||||
Manning: https://arxiv.org/abs/1906.04341
|
Manning: https://arxiv.org/abs/1906.04341
|
||||||
|
- CAT-probing: A Metric-based Approach to Interpret How Pre-trained Models for Programming Language Attend Code Structure: https://arxiv.org/abs/2210.04633
|
||||||
|
|
||||||
In order to help this new field develop, we have included a few additional features in the BERT/GPT/GPT-2 models to
|
In order to help this new field develop, we have included a few additional features in the BERT/GPT/GPT-2 models to
|
||||||
help people access the inner representations, mainly adapted from the great work of Paul Michel
|
help people access the inner representations, mainly adapted from the great work of Paul Michel
|
||||||
@ -8,6 +8,10 @@ http://www.apache.org/licenses/LICENSE-2.0
|
|||||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
specific language governing permissions and limitations under the License.
|
specific language governing permissions and limitations under the License.
|
||||||
|
|
||||||
|
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||||
|
rendered properly in your Markdown viewer.
|
||||||
|
|
||||||
-->
|
-->
|
||||||
|
|
||||||
# Instantiating a big model
|
# Instantiating a big model
|
||||||
@ -72,7 +76,7 @@ On top of the configuration of the model, we see three different weights files,
|
|||||||
|
|
||||||
The main advantage of doing this for big models is that during step 2 of the workflow shown above, each shard of the checkpoint is loaded after the previous one, capping the memory usage in RAM to the model size plus the size of the biggest shard.
|
The main advantage of doing this for big models is that during step 2 of the workflow shown above, each shard of the checkpoint is loaded after the previous one, capping the memory usage in RAM to the model size plus the size of the biggest shard.
|
||||||
|
|
||||||
Beind the scenes, the index file is used to determine which keys are in the checkpoint, and where the corresponding weights are stored. We can load that index like any json and get a dictionary:
|
Behind the scenes, the index file is used to determine which keys are in the checkpoint, and where the corresponding weights are stored. We can load that index like any json and get a dictionary:
|
||||||
|
|
||||||
```py
|
```py
|
||||||
>>> import json
|
>>> import json
|
||||||
@ -86,7 +90,7 @@ Beind the scenes, the index file is used to determine which keys are in the chec
|
|||||||
dict_keys(['metadata', 'weight_map'])
|
dict_keys(['metadata', 'weight_map'])
|
||||||
```
|
```
|
||||||
|
|
||||||
The metadata just consists of the total size of the model for now. We plan to add several other informations in the future:
|
The metadata just consists of the total size of the model for now. We plan to add other information in the future:
|
||||||
|
|
||||||
```py
|
```py
|
||||||
>>> index["metadata"]
|
>>> index["metadata"]
|
||||||
@ -1,4 +1,8 @@
|
|||||||
# Community
|
<!--⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||||
|
rendered properly in your Markdown viewer.
|
||||||
|
-->
|
||||||
|
|
||||||
|
# Community
|
||||||
|
|
||||||
This page regroups resources around 🤗 Transformers developed by the community.
|
This page regroups resources around 🤗 Transformers developed by the community.
|
||||||
|
|
||||||
@ -18,7 +22,7 @@ This page regroups resources around 🤗 Transformers developed by the community
|
|||||||
| [Fine-tune T5 for Classification and Multiple Choice](https://github.com/patil-suraj/exploring-T5/blob/master/t5_fine_tuning.ipynb) | How to fine-tune T5 for classification and multiple choice tasks using a text-to-text format with PyTorch Lightning | [Suraj Patil](https://github.com/patil-suraj) | [](https://colab.research.google.com/github/patil-suraj/exploring-T5/blob/master/t5_fine_tuning.ipynb) |
|
| [Fine-tune T5 for Classification and Multiple Choice](https://github.com/patil-suraj/exploring-T5/blob/master/t5_fine_tuning.ipynb) | How to fine-tune T5 for classification and multiple choice tasks using a text-to-text format with PyTorch Lightning | [Suraj Patil](https://github.com/patil-suraj) | [](https://colab.research.google.com/github/patil-suraj/exploring-T5/blob/master/t5_fine_tuning.ipynb) |
|
||||||
| [Fine-tune DialoGPT on New Datasets and Languages](https://github.com/ncoop57/i-am-a-nerd/blob/master/_notebooks/2020-05-12-chatbot-part-1.ipynb) | How to fine-tune the DialoGPT model on a new dataset for open-dialog conversational chatbots | [Nathan Cooper](https://github.com/ncoop57) | [](https://colab.research.google.com/github/ncoop57/i-am-a-nerd/blob/master/_notebooks/2020-05-12-chatbot-part-1.ipynb) |
|
| [Fine-tune DialoGPT on New Datasets and Languages](https://github.com/ncoop57/i-am-a-nerd/blob/master/_notebooks/2020-05-12-chatbot-part-1.ipynb) | How to fine-tune the DialoGPT model on a new dataset for open-dialog conversational chatbots | [Nathan Cooper](https://github.com/ncoop57) | [](https://colab.research.google.com/github/ncoop57/i-am-a-nerd/blob/master/_notebooks/2020-05-12-chatbot-part-1.ipynb) |
|
||||||
| [Long Sequence Modeling with Reformer](https://github.com/patrickvonplaten/notebooks/blob/master/PyTorch_Reformer.ipynb) | How to train on sequences as long as 500,000 tokens with Reformer | [Patrick von Platen](https://github.com/patrickvonplaten) | [](https://colab.research.google.com/github/patrickvonplaten/notebooks/blob/master/PyTorch_Reformer.ipynb) |
|
| [Long Sequence Modeling with Reformer](https://github.com/patrickvonplaten/notebooks/blob/master/PyTorch_Reformer.ipynb) | How to train on sequences as long as 500,000 tokens with Reformer | [Patrick von Platen](https://github.com/patrickvonplaten) | [](https://colab.research.google.com/github/patrickvonplaten/notebooks/blob/master/PyTorch_Reformer.ipynb) |
|
||||||
| [Fine-tune BART for Summarization](https://github.com/ohmeow/ohmeow_website/blob/master/_notebooks/2020-05-23-text-generation-with-blurr.ipynb) | How to fine-tune BART for summarization with fastai using blurr | [Wayde Gilliam](https://ohmeow.com/) | [](https://colab.research.google.com/github/ohmeow/ohmeow_website/blob/master/_notebooks/2020-05-23-text-generation-with-blurr.ipynb) |
|
| [Fine-tune BART for Summarization](https://github.com/ohmeow/ohmeow_website/blob/master/posts/2021-05-25-mbart-sequence-classification-with-blurr.ipynb) | How to fine-tune BART for summarization with fastai using blurr | [Wayde Gilliam](https://ohmeow.com/) | [](https://colab.research.google.com/github/ohmeow/ohmeow_website/blob/master/posts/2021-05-25-mbart-sequence-classification-with-blurr.ipynb) |
|
||||||
| [Fine-tune a pre-trained Transformer on anyone's tweets](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb) | How to generate tweets in the style of your favorite Twitter account by fine-tuning a GPT-2 model | [Boris Dayma](https://github.com/borisdayma) | [](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb) |
|
| [Fine-tune a pre-trained Transformer on anyone's tweets](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb) | How to generate tweets in the style of your favorite Twitter account by fine-tuning a GPT-2 model | [Boris Dayma](https://github.com/borisdayma) | [](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb) |
|
||||||
| [Optimize 🤗 Hugging Face models with Weights & Biases](https://colab.research.google.com/github/wandb/examples/blob/master/colabs/huggingface/Optimize_Hugging_Face_models_with_Weights_%26_Biases.ipynb) | A complete tutorial showcasing W&B integration with Hugging Face | [Boris Dayma](https://github.com/borisdayma) | [](https://colab.research.google.com/github/wandb/examples/blob/master/colabs/huggingface/Optimize_Hugging_Face_models_with_Weights_%26_Biases.ipynb) |
|
| [Optimize 🤗 Hugging Face models with Weights & Biases](https://colab.research.google.com/github/wandb/examples/blob/master/colabs/huggingface/Optimize_Hugging_Face_models_with_Weights_%26_Biases.ipynb) | A complete tutorial showcasing W&B integration with Hugging Face | [Boris Dayma](https://github.com/borisdayma) | [](https://colab.research.google.com/github/wandb/examples/blob/master/colabs/huggingface/Optimize_Hugging_Face_models_with_Weights_%26_Biases.ipynb) |
|
||||||
| [Pretrain Longformer](https://github.com/allenai/longformer/blob/master/scripts/convert_model_to_long.ipynb) | How to build a "long" version of existing pretrained models | [Iz Beltagy](https://beltagy.net) | [](https://colab.research.google.com/github/allenai/longformer/blob/master/scripts/convert_model_to_long.ipynb) |
|
| [Pretrain Longformer](https://github.com/allenai/longformer/blob/master/scripts/convert_model_to_long.ipynb) | How to build a "long" version of existing pretrained models | [Iz Beltagy](https://beltagy.net) | [](https://colab.research.google.com/github/allenai/longformer/blob/master/scripts/convert_model_to_long.ipynb) |
|
||||||
@ -1,162 +0,0 @@
|
|||||||
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
|
||||||
the License. You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
|
||||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
|
||||||
specific language governing permissions and limitations under the License.
|
|
||||||
-->
|
|
||||||
|
|
||||||
# Converting From Tensorflow Checkpoints
|
|
||||||
|
|
||||||
A command-line interface is provided to convert original Bert/GPT/GPT-2/Transformer-XL/XLNet/XLM checkpoints to models
|
|
||||||
that can be loaded using the `from_pretrained` methods of the library.
|
|
||||||
|
|
||||||
<Tip>
|
|
||||||
|
|
||||||
Since 2.3.0 the conversion script is now part of the transformers CLI (**transformers-cli**) available in any
|
|
||||||
transformers >= 2.3.0 installation.
|
|
||||||
|
|
||||||
The documentation below reflects the **transformers-cli convert** command format.
|
|
||||||
|
|
||||||
</Tip>
|
|
||||||
|
|
||||||
## BERT
|
|
||||||
|
|
||||||
You can convert any TensorFlow checkpoint for BERT (in particular [the pre-trained models released by Google](https://github.com/google-research/bert#pre-trained-models)) in a PyTorch save file by using the
|
|
||||||
[convert_bert_original_tf_checkpoint_to_pytorch.py](https://github.com/huggingface/transformers/tree/main/src/transformers/models/bert/convert_bert_original_tf_checkpoint_to_pytorch.py) script.
|
|
||||||
|
|
||||||
This CLI takes as input a TensorFlow checkpoint (three files starting with `bert_model.ckpt`) and the associated
|
|
||||||
configuration file (`bert_config.json`), and creates a PyTorch model for this configuration, loads the weights from
|
|
||||||
the TensorFlow checkpoint in the PyTorch model and saves the resulting model in a standard PyTorch save file that can
|
|
||||||
be imported using `from_pretrained()` (see example in [quicktour](quicktour) , [run_glue.py](https://github.com/huggingface/transformers/tree/main/examples/pytorch/text-classification/run_glue.py) ).
|
|
||||||
|
|
||||||
You only need to run this conversion script **once** to get a PyTorch model. You can then disregard the TensorFlow
|
|
||||||
checkpoint (the three files starting with `bert_model.ckpt`) but be sure to keep the configuration file (\
|
|
||||||
`bert_config.json`) and the vocabulary file (`vocab.txt`) as these are needed for the PyTorch model too.
|
|
||||||
|
|
||||||
To run this specific conversion script you will need to have TensorFlow and PyTorch installed (`pip install tensorflow`). The rest of the repository only requires PyTorch.
|
|
||||||
|
|
||||||
Here is an example of the conversion process for a pre-trained `BERT-Base Uncased` model:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
export BERT_BASE_DIR=/path/to/bert/uncased_L-12_H-768_A-12
|
|
||||||
|
|
||||||
transformers-cli convert --model_type bert \
|
|
||||||
--tf_checkpoint $BERT_BASE_DIR/bert_model.ckpt \
|
|
||||||
--config $BERT_BASE_DIR/bert_config.json \
|
|
||||||
--pytorch_dump_output $BERT_BASE_DIR/pytorch_model.bin
|
|
||||||
```
|
|
||||||
|
|
||||||
You can download Google's pre-trained models for the conversion [here](https://github.com/google-research/bert#pre-trained-models).
|
|
||||||
|
|
||||||
## ALBERT
|
|
||||||
|
|
||||||
Convert TensorFlow model checkpoints of ALBERT to PyTorch using the
|
|
||||||
[convert_albert_original_tf_checkpoint_to_pytorch.py](https://github.com/huggingface/transformers/tree/main/src/transformers/models/albert/convert_albert_original_tf_checkpoint_to_pytorch.py) script.
|
|
||||||
|
|
||||||
The CLI takes as input a TensorFlow checkpoint (three files starting with `model.ckpt-best`) and the accompanying
|
|
||||||
configuration file (`albert_config.json`), then creates and saves a PyTorch model. To run this conversion you will
|
|
||||||
need to have TensorFlow and PyTorch installed.
|
|
||||||
|
|
||||||
Here is an example of the conversion process for the pre-trained `ALBERT Base` model:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
export ALBERT_BASE_DIR=/path/to/albert/albert_base
|
|
||||||
|
|
||||||
transformers-cli convert --model_type albert \
|
|
||||||
--tf_checkpoint $ALBERT_BASE_DIR/model.ckpt-best \
|
|
||||||
--config $ALBERT_BASE_DIR/albert_config.json \
|
|
||||||
--pytorch_dump_output $ALBERT_BASE_DIR/pytorch_model.bin
|
|
||||||
```
|
|
||||||
|
|
||||||
You can download Google's pre-trained models for the conversion [here](https://github.com/google-research/albert#pre-trained-models).
|
|
||||||
|
|
||||||
## OpenAI GPT
|
|
||||||
|
|
||||||
Here is an example of the conversion process for a pre-trained OpenAI GPT model, assuming that your NumPy checkpoint
|
|
||||||
save as the same format than OpenAI pretrained model (see [here](https://github.com/openai/finetune-transformer-lm)\
|
|
||||||
)
|
|
||||||
|
|
||||||
```bash
|
|
||||||
export OPENAI_GPT_CHECKPOINT_FOLDER_PATH=/path/to/openai/pretrained/numpy/weights
|
|
||||||
|
|
||||||
transformers-cli convert --model_type gpt \
|
|
||||||
--tf_checkpoint $OPENAI_GPT_CHECKPOINT_FOLDER_PATH \
|
|
||||||
--pytorch_dump_output $PYTORCH_DUMP_OUTPUT \
|
|
||||||
[--config OPENAI_GPT_CONFIG] \
|
|
||||||
[--finetuning_task_name OPENAI_GPT_FINETUNED_TASK] \
|
|
||||||
```
|
|
||||||
|
|
||||||
## OpenAI GPT-2
|
|
||||||
|
|
||||||
Here is an example of the conversion process for a pre-trained OpenAI GPT-2 model (see [here](https://github.com/openai/gpt-2))
|
|
||||||
|
|
||||||
```bash
|
|
||||||
export OPENAI_GPT2_CHECKPOINT_PATH=/path/to/gpt2/pretrained/weights
|
|
||||||
|
|
||||||
transformers-cli convert --model_type gpt2 \
|
|
||||||
--tf_checkpoint $OPENAI_GPT2_CHECKPOINT_PATH \
|
|
||||||
--pytorch_dump_output $PYTORCH_DUMP_OUTPUT \
|
|
||||||
[--config OPENAI_GPT2_CONFIG] \
|
|
||||||
[--finetuning_task_name OPENAI_GPT2_FINETUNED_TASK]
|
|
||||||
```
|
|
||||||
|
|
||||||
## Transformer-XL
|
|
||||||
|
|
||||||
Here is an example of the conversion process for a pre-trained Transformer-XL model (see [here](https://github.com/kimiyoung/transformer-xl/tree/master/tf#obtain-and-evaluate-pretrained-sota-models))
|
|
||||||
|
|
||||||
```bash
|
|
||||||
export TRANSFO_XL_CHECKPOINT_FOLDER_PATH=/path/to/transfo/xl/checkpoint
|
|
||||||
|
|
||||||
transformers-cli convert --model_type transfo_xl \
|
|
||||||
--tf_checkpoint $TRANSFO_XL_CHECKPOINT_FOLDER_PATH \
|
|
||||||
--pytorch_dump_output $PYTORCH_DUMP_OUTPUT \
|
|
||||||
[--config TRANSFO_XL_CONFIG] \
|
|
||||||
[--finetuning_task_name TRANSFO_XL_FINETUNED_TASK]
|
|
||||||
```
|
|
||||||
|
|
||||||
## XLNet
|
|
||||||
|
|
||||||
Here is an example of the conversion process for a pre-trained XLNet model:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
export TRANSFO_XL_CHECKPOINT_PATH=/path/to/xlnet/checkpoint
|
|
||||||
export TRANSFO_XL_CONFIG_PATH=/path/to/xlnet/config
|
|
||||||
|
|
||||||
transformers-cli convert --model_type xlnet \
|
|
||||||
--tf_checkpoint $TRANSFO_XL_CHECKPOINT_PATH \
|
|
||||||
--config $TRANSFO_XL_CONFIG_PATH \
|
|
||||||
--pytorch_dump_output $PYTORCH_DUMP_OUTPUT \
|
|
||||||
[--finetuning_task_name XLNET_FINETUNED_TASK] \
|
|
||||||
```
|
|
||||||
|
|
||||||
## XLM
|
|
||||||
|
|
||||||
Here is an example of the conversion process for a pre-trained XLM model:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
export XLM_CHECKPOINT_PATH=/path/to/xlm/checkpoint
|
|
||||||
|
|
||||||
transformers-cli convert --model_type xlm \
|
|
||||||
--tf_checkpoint $XLM_CHECKPOINT_PATH \
|
|
||||||
--pytorch_dump_output $PYTORCH_DUMP_OUTPUT
|
|
||||||
[--config XML_CONFIG] \
|
|
||||||
[--finetuning_task_name XML_FINETUNED_TASK]
|
|
||||||
```
|
|
||||||
|
|
||||||
## T5
|
|
||||||
|
|
||||||
Here is an example of the conversion process for a pre-trained T5 model:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
export T5=/path/to/t5/uncased_L-12_H-768_A-12
|
|
||||||
|
|
||||||
transformers-cli convert --model_type t5 \
|
|
||||||
--tf_checkpoint $T5/t5_model.ckpt \
|
|
||||||
--config $T5/t5_config.json \
|
|
||||||
--pytorch_dump_output $T5/pytorch_model.bin
|
|
||||||
```
|
|
||||||
@ -8,6 +8,10 @@ http://www.apache.org/licenses/LICENSE-2.0
|
|||||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
specific language governing permissions and limitations under the License.
|
specific language governing permissions and limitations under the License.
|
||||||
|
|
||||||
|
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||||
|
rendered properly in your Markdown viewer.
|
||||||
|
|
||||||
-->
|
-->
|
||||||
|
|
||||||
# Create a custom architecture
|
# Create a custom architecture
|
||||||
@ -17,7 +21,8 @@ An [`AutoClass`](model_doc/auto) automatically infers the model architecture and
|
|||||||
- Load and customize a model configuration.
|
- Load and customize a model configuration.
|
||||||
- Create a model architecture.
|
- Create a model architecture.
|
||||||
- Create a slow and fast tokenizer for text.
|
- Create a slow and fast tokenizer for text.
|
||||||
- Create a feature extractor for audio or image tasks.
|
- Create an image processor for vision tasks.
|
||||||
|
- Create a feature extractor for audio tasks.
|
||||||
- Create a processor for multimodal tasks.
|
- Create a processor for multimodal tasks.
|
||||||
|
|
||||||
## Configuration
|
## Configuration
|
||||||
@ -94,7 +99,7 @@ Once you are satisfied with your model configuration, you can save it with [`~Pr
|
|||||||
To reuse the configuration file, load it with [`~PretrainedConfig.from_pretrained`]:
|
To reuse the configuration file, load it with [`~PretrainedConfig.from_pretrained`]:
|
||||||
|
|
||||||
```py
|
```py
|
||||||
>>> my_config = DistilBertConfig.from_pretrained("./your_model_save_path/my_config.json")
|
>>> my_config = DistilBertConfig.from_pretrained("./your_model_save_path/config.json")
|
||||||
```
|
```
|
||||||
|
|
||||||
<Tip>
|
<Tip>
|
||||||
@ -114,7 +119,7 @@ Load your custom configuration attributes into the model:
|
|||||||
```py
|
```py
|
||||||
>>> from transformers import DistilBertModel
|
>>> from transformers import DistilBertModel
|
||||||
|
|
||||||
>>> my_config = DistilBertConfig.from_pretrained("./your_model_save_path/my_config.json")
|
>>> my_config = DistilBertConfig.from_pretrained("./your_model_save_path/config.json")
|
||||||
>>> model = DistilBertModel(my_config)
|
>>> model = DistilBertModel(my_config)
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -244,21 +249,21 @@ By default, [`AutoTokenizer`] will try to load a fast tokenizer. You can disable
|
|||||||
|
|
||||||
</Tip>
|
</Tip>
|
||||||
|
|
||||||
## Feature Extractor
|
## Image Processor
|
||||||
|
|
||||||
A feature extractor processes audio or image inputs. It inherits from the base [`~feature_extraction_utils.FeatureExtractionMixin`] class, and may also inherit from the [`ImageFeatureExtractionMixin`] class for processing image features or the [`SequenceFeatureExtractor`] class for processing audio inputs.
|
An image processor processes vision inputs. It inherits from the base [`~image_processing_utils.ImageProcessingMixin`] class.
|
||||||
|
|
||||||
Depending on whether you are working on an audio or vision task, create a feature extractor associated with the model you're using. For example, create a default [`ViTFeatureExtractor`] if you are using [ViT](model_doc/vit) for image classification:
|
To use, create an image processor associated with the model you're using. For example, create a default [`ViTImageProcessor`] if you are using [ViT](model_doc/vit) for image classification:
|
||||||
|
|
||||||
```py
|
```py
|
||||||
>>> from transformers import ViTFeatureExtractor
|
>>> from transformers import ViTImageProcessor
|
||||||
|
|
||||||
>>> vit_extractor = ViTFeatureExtractor()
|
>>> vit_extractor = ViTImageProcessor()
|
||||||
>>> print(vit_extractor)
|
>>> print(vit_extractor)
|
||||||
ViTFeatureExtractor {
|
ViTImageProcessor {
|
||||||
"do_normalize": true,
|
"do_normalize": true,
|
||||||
"do_resize": true,
|
"do_resize": true,
|
||||||
"feature_extractor_type": "ViTFeatureExtractor",
|
"image_processor_type": "ViTImageProcessor",
|
||||||
"image_mean": [
|
"image_mean": [
|
||||||
0.5,
|
0.5,
|
||||||
0.5,
|
0.5,
|
||||||
@ -276,21 +281,21 @@ ViTFeatureExtractor {
|
|||||||
|
|
||||||
<Tip>
|
<Tip>
|
||||||
|
|
||||||
If you aren't looking for any customization, just use the `from_pretrained` method to load a model's default feature extractor parameters.
|
If you aren't looking for any customization, just use the `from_pretrained` method to load a model's default image processor parameters.
|
||||||
|
|
||||||
</Tip>
|
</Tip>
|
||||||
|
|
||||||
Modify any of the [`ViTFeatureExtractor`] parameters to create your custom feature extractor:
|
Modify any of the [`ViTImageProcessor`] parameters to create your custom image processor:
|
||||||
|
|
||||||
```py
|
```py
|
||||||
>>> from transformers import ViTFeatureExtractor
|
>>> from transformers import ViTImageProcessor
|
||||||
|
|
||||||
>>> my_vit_extractor = ViTFeatureExtractor(resample="PIL.Image.BOX", do_normalize=False, image_mean=[0.3, 0.3, 0.3])
|
>>> my_vit_extractor = ViTImageProcessor(resample="PIL.Image.BOX", do_normalize=False, image_mean=[0.3, 0.3, 0.3])
|
||||||
>>> print(my_vit_extractor)
|
>>> print(my_vit_extractor)
|
||||||
ViTFeatureExtractor {
|
ViTImageProcessor {
|
||||||
"do_normalize": false,
|
"do_normalize": false,
|
||||||
"do_resize": true,
|
"do_resize": true,
|
||||||
"feature_extractor_type": "ViTFeatureExtractor",
|
"image_processor_type": "ViTImageProcessor",
|
||||||
"image_mean": [
|
"image_mean": [
|
||||||
0.3,
|
0.3,
|
||||||
0.3,
|
0.3,
|
||||||
@ -306,7 +311,11 @@ ViTFeatureExtractor {
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
For audio inputs, you can create a [`Wav2Vec2FeatureExtractor`] and customize the parameters in a similar way:
|
## Feature Extractor
|
||||||
|
|
||||||
|
A feature extractor processes audio inputs. It inherits from the base [`~feature_extraction_utils.FeatureExtractionMixin`] class, and may also inherit from the [`SequenceFeatureExtractor`] class for processing audio inputs.
|
||||||
|
|
||||||
|
To use, create a feature extractor associated with the model you're using. For example, create a default [`Wav2Vec2FeatureExtractor`] if you are using [Wav2Vec2](model_doc/wav2vec2) for audio classification:
|
||||||
|
|
||||||
```py
|
```py
|
||||||
>>> from transformers import Wav2Vec2FeatureExtractor
|
>>> from transformers import Wav2Vec2FeatureExtractor
|
||||||
@ -324,9 +333,34 @@ Wav2Vec2FeatureExtractor {
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
<Tip>
|
||||||
|
|
||||||
|
If you aren't looking for any customization, just use the `from_pretrained` method to load a model's default feature extractor parameters.
|
||||||
|
|
||||||
|
</Tip>
|
||||||
|
|
||||||
|
Modify any of the [`Wav2Vec2FeatureExtractor`] parameters to create your custom feature extractor:
|
||||||
|
|
||||||
|
```py
|
||||||
|
>>> from transformers import Wav2Vec2FeatureExtractor
|
||||||
|
|
||||||
|
>>> w2v2_extractor = Wav2Vec2FeatureExtractor(sampling_rate=8000, do_normalize=False)
|
||||||
|
>>> print(w2v2_extractor)
|
||||||
|
Wav2Vec2FeatureExtractor {
|
||||||
|
"do_normalize": false,
|
||||||
|
"feature_extractor_type": "Wav2Vec2FeatureExtractor",
|
||||||
|
"feature_size": 1,
|
||||||
|
"padding_side": "right",
|
||||||
|
"padding_value": 0.0,
|
||||||
|
"return_attention_mask": false,
|
||||||
|
"sampling_rate": 8000
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
## Processor
|
## Processor
|
||||||
|
|
||||||
For models that support multimodal tasks, 🤗 Transformers offers a processor class that conveniently wraps a feature extractor and tokenizer into a single object. For example, let's use the [`Wav2Vec2Processor`] for an automatic speech recognition task (ASR). ASR transcribes audio to text, so you will need a feature extractor and a tokenizer.
|
For models that support multimodal tasks, 🤗 Transformers offers a processor class that conveniently wraps processing classes such as a feature extractor and a tokenizer into a single object. For example, let's use the [`Wav2Vec2Processor`] for an automatic speech recognition task (ASR). ASR transcribes audio to text, so you will need a feature extractor and a tokenizer.
|
||||||
|
|
||||||
Create a feature extractor to handle the audio inputs:
|
Create a feature extractor to handle the audio inputs:
|
||||||
|
|
||||||
@ -352,4 +386,4 @@ Combine the feature extractor and tokenizer in [`Wav2Vec2Processor`]:
|
|||||||
>>> processor = Wav2Vec2Processor(feature_extractor=feature_extractor, tokenizer=tokenizer)
|
>>> processor = Wav2Vec2Processor(feature_extractor=feature_extractor, tokenizer=tokenizer)
|
||||||
```
|
```
|
||||||
|
|
||||||
With two basic classes - configuration and model - and an additional preprocessing class (tokenizer, feature extractor, or processor), you can create any of the models supported by 🤗 Transformers. Each of these base classes are configurable, allowing you to use the specific attributes you want. You can easily setup a model for training or modify an existing pretrained model to fine-tune.
|
With two basic classes - configuration and model - and an additional preprocessing class (tokenizer, image processor, feature extractor, or processor), you can create any of the models supported by 🤗 Transformers. Each of these base classes are configurable, allowing you to use the specific attributes you want. You can easily setup a model for training or modify an existing pretrained model to fine-tune.
|
||||||
@ -8,6 +8,10 @@ http://www.apache.org/licenses/LICENSE-2.0
|
|||||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
specific language governing permissions and limitations under the License.
|
specific language governing permissions and limitations under the License.
|
||||||
|
|
||||||
|
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||||
|
rendered properly in your Markdown viewer.
|
||||||
|
|
||||||
-->
|
-->
|
||||||
|
|
||||||
# Sharing custom models
|
# Sharing custom models
|
||||||
@ -21,7 +25,7 @@ with the community (with the code it relies on) so that anyone can use it, even
|
|||||||
Transformers library.
|
Transformers library.
|
||||||
|
|
||||||
We will illustrate all of this on a ResNet model, by wrapping the ResNet class of the
|
We will illustrate all of this on a ResNet model, by wrapping the ResNet class of the
|
||||||
[timm library](https://github.com/rwightman/pytorch-image-models/tree/master/timm) into a [`PreTrainedModel`].
|
[timm library](https://github.com/rwightman/pytorch-image-models) into a [`PreTrainedModel`].
|
||||||
|
|
||||||
## Writing a custom configuration
|
## Writing a custom configuration
|
||||||
|
|
||||||
@ -55,9 +59,9 @@ class ResnetConfig(PretrainedConfig):
|
|||||||
**kwargs,
|
**kwargs,
|
||||||
):
|
):
|
||||||
if block_type not in ["basic", "bottleneck"]:
|
if block_type not in ["basic", "bottleneck"]:
|
||||||
raise ValueError(f"`block` must be 'basic' or bottleneck', got {block}.")
|
raise ValueError(f"`block_type` must be 'basic' or bottleneck', got {block_type}.")
|
||||||
if stem_type not in ["", "deep", "deep-tiered"]:
|
if stem_type not in ["", "deep", "deep-tiered"]:
|
||||||
raise ValueError(f"`stem_type` must be '', 'deep' or 'deep-tiered', got {block}.")
|
raise ValueError(f"`stem_type` must be '', 'deep' or 'deep-tiered', got {stem_type}.")
|
||||||
|
|
||||||
self.block_type = block_type
|
self.block_type = block_type
|
||||||
self.layers = layers
|
self.layers = layers
|
||||||
@ -146,6 +150,9 @@ class ResnetModel(PreTrainedModel):
|
|||||||
For the model that will classify images, we just change the forward method:
|
For the model that will classify images, we just change the forward method:
|
||||||
|
|
||||||
```py
|
```py
|
||||||
|
import torch
|
||||||
|
|
||||||
|
|
||||||
class ResnetModelForImageClassification(PreTrainedModel):
|
class ResnetModelForImageClassification(PreTrainedModel):
|
||||||
config_class = ResnetConfig
|
config_class = ResnetConfig
|
||||||
|
|
||||||
789
docs/source/en/custom_tools.md
Normal file
789
docs/source/en/custom_tools.md
Normal file
@ -0,0 +1,789 @@
|
|||||||
|
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
|
the License. You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
|
specific language governing permissions and limitations under the License.
|
||||||
|
|
||||||
|
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||||
|
rendered properly in your Markdown viewer.
|
||||||
|
|
||||||
|
-->
|
||||||
|
|
||||||
|
# Custom Tools and Prompts
|
||||||
|
|
||||||
|
<Tip>
|
||||||
|
|
||||||
|
If you are not aware of what tools and agents are in the context of transformers, we recommend you read the
|
||||||
|
[Transformers Agents](transformers_agents) page first.
|
||||||
|
|
||||||
|
</Tip>
|
||||||
|
|
||||||
|
<Tip warning={true}>
|
||||||
|
|
||||||
|
Transformers Agent is an experimental API that is subject to change at any time. Results returned by the agents
|
||||||
|
can vary as the APIs or underlying models are prone to change.
|
||||||
|
|
||||||
|
</Tip>
|
||||||
|
|
||||||
|
Creating and using custom tools and prompts is paramount to empowering the agent and having it perform new tasks.
|
||||||
|
In this guide we'll take a look at:
|
||||||
|
|
||||||
|
- How to customize the prompt
|
||||||
|
- How to use custom tools
|
||||||
|
- How to create custom tools
|
||||||
|
|
||||||
|
## Customizing the prompt
|
||||||
|
|
||||||
|
As explained in [Transformers Agents](transformers_agents) agents can run in [`~Agent.run`] and [`~Agent.chat`] mode.
|
||||||
|
Both the `run` and `chat` modes underlie the same logic. The language model powering the agent is conditioned on a long
|
||||||
|
prompt and completes the prompt by generating the next tokens until the stop token is reached.
|
||||||
|
The only difference between the two modes is that during the `chat` mode the prompt is extended with
|
||||||
|
previous user inputs and model generations. This allows the agent to have access to past interactions,
|
||||||
|
seemingly giving the agent some kind of memory.
|
||||||
|
|
||||||
|
### Structure of the prompt
|
||||||
|
|
||||||
|
Let's take a closer look at how the prompt is structured to understand how it can be best customized.
|
||||||
|
The prompt is structured broadly into four parts.
|
||||||
|
|
||||||
|
- 1. Introduction: how the agent should behave, explanation of the concept of tools.
|
||||||
|
- 2. Description of all the tools. This is defined by a `<<all_tools>>` token that is dynamically replaced at runtime with the tools defined/chosen by the user.
|
||||||
|
- 3. A set of examples of tasks and their solution
|
||||||
|
- 4. Current example, and request for solution.
|
||||||
|
|
||||||
|
To better understand each part, let's look at a shortened version of how the `run` prompt can look like:
|
||||||
|
|
||||||
|
````text
|
||||||
|
I will ask you to perform a task, your job is to come up with a series of simple commands in Python that will perform the task.
|
||||||
|
[...]
|
||||||
|
You can print intermediate results if it makes sense to do so.
|
||||||
|
|
||||||
|
Tools:
|
||||||
|
- document_qa: This is a tool that answers a question about a document (pdf). It takes an input named `document` which should be the document containing the information, as well as a `question` that is the question about the document. It returns a text that contains the answer to the question.
|
||||||
|
- image_captioner: This is a tool that generates a description of an image. It takes an input named `image` which should be the image to the caption and returns a text that contains the description in English.
|
||||||
|
[...]
|
||||||
|
|
||||||
|
Task: "Answer the question in the variable `question` about the image stored in the variable `image`. The question is in French."
|
||||||
|
|
||||||
|
I will use the following tools: `translator` to translate the question into English and then `image_qa` to answer the question on the input image.
|
||||||
|
|
||||||
|
Answer:
|
||||||
|
```py
|
||||||
|
translated_question = translator(question=question, src_lang="French", tgt_lang="English")
|
||||||
|
print(f"The translated question is {translated_question}.")
|
||||||
|
answer = image_qa(image=image, question=translated_question)
|
||||||
|
print(f"The answer is {answer}")
|
||||||
|
```
|
||||||
|
|
||||||
|
Task: "Identify the oldest person in the `document` and create an image showcasing the result as a banner."
|
||||||
|
|
||||||
|
I will use the following tools: `document_qa` to find the oldest person in the document, then `image_generator` to generate an image according to the answer.
|
||||||
|
|
||||||
|
Answer:
|
||||||
|
```py
|
||||||
|
answer = document_qa(document, question="What is the oldest person?")
|
||||||
|
print(f"The answer is {answer}.")
|
||||||
|
image = image_generator("A banner showing " + answer)
|
||||||
|
```
|
||||||
|
|
||||||
|
[...]
|
||||||
|
|
||||||
|
Task: "Draw me a picture of rivers and lakes"
|
||||||
|
|
||||||
|
I will use the following
|
||||||
|
````
|
||||||
|
|
||||||
|
The introduction (the text before *"Tools:"*) explains precisely how the model shall behave and what it should do.
|
||||||
|
This part most likely does not need to be customized as the agent shall always behave the same way.
|
||||||
|
|
||||||
|
The second part (the bullet points below *"Tools"*) is dynamically added upon calling `run` or `chat`. There are
|
||||||
|
exactly as many bullet points as there are tools in `agent.toolbox` and each bullet point consists of the name
|
||||||
|
and description of the tool:
|
||||||
|
|
||||||
|
```text
|
||||||
|
- <tool.name>: <tool.description>
|
||||||
|
```
|
||||||
|
|
||||||
|
Let's verify this quickly by loading the document_qa tool and printing out the name and description.
|
||||||
|
|
||||||
|
```py
|
||||||
|
from transformers import load_tool
|
||||||
|
|
||||||
|
document_qa = load_tool("document-question-answering")
|
||||||
|
print(f"- {document_qa.name}: {document_qa.description}")
|
||||||
|
```
|
||||||
|
|
||||||
|
which gives:
|
||||||
|
```text
|
||||||
|
- document_qa: This is a tool that answers a question about a document (pdf). It takes an input named `document` which should be the document containing the information, as well as a `question` that is the question about the document. It returns a text that contains the answer to the question.
|
||||||
|
```
|
||||||
|
|
||||||
|
We can see that the tool name is short and precise. The description includes two parts, the first explaining
|
||||||
|
what the tool does and the second states what input arguments and return values are expected.
|
||||||
|
|
||||||
|
A good tool name and tool description are very important for the agent to correctly use it. Note that the only
|
||||||
|
information the agent has about the tool is its name and description, so one should make sure that both
|
||||||
|
are precisely written and match the style of the existing tools in the toolbox. In particular make sure the description
|
||||||
|
mentions all the arguments expected by name in code-style, along with the expected type and a description of what they
|
||||||
|
are.
|
||||||
|
|
||||||
|
<Tip>
|
||||||
|
|
||||||
|
Check the naming and description of the curated Transformers tools to better understand what name and
|
||||||
|
description a tool is expected to have. You can see all tools with the [`Agent.toolbox`] property.
|
||||||
|
|
||||||
|
</Tip>
|
||||||
|
|
||||||
|
The third part includes a set of curated examples that show the agent exactly what code it should produce
|
||||||
|
for what kind of user request. The large language models empowering the agent are extremely good at
|
||||||
|
recognizing patterns in a prompt and repeating the pattern with new data. Therefore, it is very important
|
||||||
|
that the examples are written in a way that maximizes the likelihood of the agent to generating correct,
|
||||||
|
executable code in practice.
|
||||||
|
|
||||||
|
Let's have a look at one example:
|
||||||
|
|
||||||
|
````text
|
||||||
|
Task: "Identify the oldest person in the `document` and create an image showcasing the result as a banner."
|
||||||
|
|
||||||
|
I will use the following tools: `document_qa` to find the oldest person in the document, then `image_generator` to generate an image according to the answer.
|
||||||
|
|
||||||
|
Answer:
|
||||||
|
```py
|
||||||
|
answer = document_qa(document, question="What is the oldest person?")
|
||||||
|
print(f"The answer is {answer}.")
|
||||||
|
image = image_generator("A banner showing " + answer)
|
||||||
|
```
|
||||||
|
|
||||||
|
````
|
||||||
|
|
||||||
|
The pattern the model is prompted to repeat has three parts: The task statement, the agent's explanation of
|
||||||
|
what it intends to do, and finally the generated code. Every example that is part of the prompt has this exact
|
||||||
|
pattern, thus making sure that the agent will reproduce exactly the same pattern when generating new tokens.
|
||||||
|
|
||||||
|
The prompt examples are curated by the Transformers team and rigorously evaluated on a set of
|
||||||
|
[problem statements](https://github.com/huggingface/transformers/blob/main/src/transformers/tools/evaluate_agent.py)
|
||||||
|
to ensure that the agent's prompt is as good as possible to solve real use cases of the agent.
|
||||||
|
|
||||||
|
The final part of the prompt corresponds to:
|
||||||
|
```text
|
||||||
|
Task: "Draw me a picture of rivers and lakes"
|
||||||
|
|
||||||
|
I will use the following
|
||||||
|
```
|
||||||
|
|
||||||
|
is a final and unfinished example that the agent is tasked to complete. The unfinished example
|
||||||
|
is dynamically created based on the actual user input. For the above example, the user ran:
|
||||||
|
|
||||||
|
```py
|
||||||
|
agent.run("Draw me a picture of rivers and lakes")
|
||||||
|
```
|
||||||
|
|
||||||
|
The user input - *a.k.a* the task: *"Draw me a picture of rivers and lakes"* is cast into the
|
||||||
|
prompt template: "Task: <task> \n\n I will use the following". This sentence makes up the final lines of the
|
||||||
|
prompt the agent is conditioned on, therefore strongly influencing the agent to finish the example
|
||||||
|
exactly in the same way it was previously done in the examples.
|
||||||
|
|
||||||
|
Without going into too much detail, the chat template has the same prompt structure with the
|
||||||
|
examples having a slightly different style, *e.g.*:
|
||||||
|
|
||||||
|
````text
|
||||||
|
[...]
|
||||||
|
|
||||||
|
=====
|
||||||
|
|
||||||
|
Human: Answer the question in the variable `question` about the image stored in the variable `image`.
|
||||||
|
|
||||||
|
Assistant: I will use the tool `image_qa` to answer the question on the input image.
|
||||||
|
|
||||||
|
```py
|
||||||
|
answer = image_qa(text=question, image=image)
|
||||||
|
print(f"The answer is {answer}")
|
||||||
|
```
|
||||||
|
|
||||||
|
Human: I tried this code, it worked but didn't give me a good result. The question is in French
|
||||||
|
|
||||||
|
Assistant: In this case, the question needs to be translated first. I will use the tool `translator` to do this.
|
||||||
|
|
||||||
|
```py
|
||||||
|
translated_question = translator(question=question, src_lang="French", tgt_lang="English")
|
||||||
|
print(f"The translated question is {translated_question}.")
|
||||||
|
answer = image_qa(text=translated_question, image=image)
|
||||||
|
print(f"The answer is {answer}")
|
||||||
|
```
|
||||||
|
|
||||||
|
=====
|
||||||
|
|
||||||
|
[...]
|
||||||
|
````
|
||||||
|
|
||||||
|
Contrary, to the examples of the `run` prompt, each `chat` prompt example has one or more exchanges between the
|
||||||
|
*Human* and the *Assistant*. Every exchange is structured similarly to the example of the `run` prompt.
|
||||||
|
The user's input is appended to behind *Human:* and the agent is prompted to first generate what needs to be done
|
||||||
|
before generating code. An exchange can be based on previous exchanges, therefore allowing the user to refer
|
||||||
|
to past exchanges as is done *e.g.* above by the user's input of "I tried **this** code" refers to the
|
||||||
|
previously generated code of the agent.
|
||||||
|
|
||||||
|
Upon running `.chat`, the user's input or *task* is cast into an unfinished example of the form:
|
||||||
|
```text
|
||||||
|
Human: <user-input>\n\nAssistant:
|
||||||
|
```
|
||||||
|
which the agent completes. Contrary to the `run` command, the `chat` command then appends the completed example
|
||||||
|
to the prompt, thus giving the agent more context for the next `chat` turn.
|
||||||
|
|
||||||
|
Great now that we know how the prompt is structured, let's see how we can customize it!
|
||||||
|
|
||||||
|
### Writing good user inputs
|
||||||
|
|
||||||
|
While large language models are getting better and better at understanding users' intentions, it helps
|
||||||
|
enormously to be as precise as possible to help the agent pick the correct task. What does it mean to be
|
||||||
|
as precise as possible?
|
||||||
|
|
||||||
|
The agent sees a list of tool names and their description in its prompt. The more tools are added the
|
||||||
|
more difficult it becomes for the agent to choose the correct tool and it's even more difficult to choose
|
||||||
|
the correct sequences of tools to run. Let's look at a common failure case, here we will only return
|
||||||
|
the code to analyze it.
|
||||||
|
|
||||||
|
```py
|
||||||
|
from transformers import HfAgent
|
||||||
|
|
||||||
|
agent = HfAgent("https://api-inference.huggingface.co/models/bigcode/starcoder")
|
||||||
|
|
||||||
|
agent.run("Show me a tree", return_code=True)
|
||||||
|
```
|
||||||
|
|
||||||
|
gives:
|
||||||
|
|
||||||
|
```text
|
||||||
|
==Explanation from the agent==
|
||||||
|
I will use the following tool: `image_segmenter` to create a segmentation mask for the image.
|
||||||
|
|
||||||
|
|
||||||
|
==Code generated by the agent==
|
||||||
|
mask = image_segmenter(image, prompt="tree")
|
||||||
|
```
|
||||||
|
|
||||||
|
which is probably not what we wanted. Instead, it is more likely that we want an image of a tree to be generated.
|
||||||
|
To steer the agent more towards using a specific tool it can therefore be very helpful to use important keywords that
|
||||||
|
are present in the tool's name and description. Let's have a look.
|
||||||
|
```py
|
||||||
|
agent.toolbox["image_generator"].description
|
||||||
|
```
|
||||||
|
|
||||||
|
```text
|
||||||
|
'This is a tool that creates an image according to a prompt, which is a text description. It takes an input named `prompt` which contains the image description and outputs an image.
|
||||||
|
```
|
||||||
|
|
||||||
|
The name and description make use of the keywords "image", "prompt", "create" and "generate". Using these words will most likely work better here. Let's refine our prompt a bit.
|
||||||
|
|
||||||
|
```py
|
||||||
|
agent.run("Create an image of a tree", return_code=True)
|
||||||
|
```
|
||||||
|
|
||||||
|
gives:
|
||||||
|
```text
|
||||||
|
==Explanation from the agent==
|
||||||
|
I will use the following tool `image_generator` to generate an image of a tree.
|
||||||
|
|
||||||
|
|
||||||
|
==Code generated by the agent==
|
||||||
|
image = image_generator(prompt="tree")
|
||||||
|
```
|
||||||
|
|
||||||
|
Much better! That looks more like what we want. In short, when you notice that the agent struggles to
|
||||||
|
correctly map your task to the correct tools, try looking up the most pertinent keywords of the tool's name
|
||||||
|
and description and try refining your task request with it.
|
||||||
|
|
||||||
|
### Customizing the tool descriptions
|
||||||
|
|
||||||
|
As we've seen before the agent has access to each of the tools' names and descriptions. The base tools
|
||||||
|
should have very precise names and descriptions, however, you might find that it could help to change the
|
||||||
|
the description or name of a tool for your specific use case. This might become especially important
|
||||||
|
when you've added multiple tools that are very similar or if you want to use your agent only for a certain
|
||||||
|
domain, *e.g.* image generation and transformations.
|
||||||
|
|
||||||
|
A common problem is that the agent confuses image generation with image transformation/modification when
|
||||||
|
used a lot for image generation tasks, *e.g.*
|
||||||
|
```py
|
||||||
|
agent.run("Make an image of a house and a car", return_code=True)
|
||||||
|
```
|
||||||
|
returns
|
||||||
|
```text
|
||||||
|
==Explanation from the agent==
|
||||||
|
I will use the following tools `image_generator` to generate an image of a house and `image_transformer` to transform the image of a car into the image of a house.
|
||||||
|
|
||||||
|
==Code generated by the agent==
|
||||||
|
house_image = image_generator(prompt="A house")
|
||||||
|
car_image = image_generator(prompt="A car")
|
||||||
|
house_car_image = image_transformer(image=car_image, prompt="A house")
|
||||||
|
```
|
||||||
|
|
||||||
|
which is probably not exactly what we want here. It seems like the agent has a difficult time
|
||||||
|
to understand the difference between `image_generator` and `image_transformer` and often uses the two together.
|
||||||
|
|
||||||
|
We can help the agent here by changing the tool name and description of `image_transformer`. Let's instead call it `modifier`
|
||||||
|
to disassociate it a bit from "image" and "prompt":
|
||||||
|
```py
|
||||||
|
agent.toolbox["modifier"] = agent.toolbox.pop("image_transformer")
|
||||||
|
agent.toolbox["modifier"].description = agent.toolbox["modifier"].description.replace(
|
||||||
|
"transforms an image according to a prompt", "modifies an image"
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
Now "modify" is a strong cue to use the new image processor which should help with the above prompt. Let's run it again.
|
||||||
|
|
||||||
|
```py
|
||||||
|
agent.run("Make an image of a house and a car", return_code=True)
|
||||||
|
```
|
||||||
|
|
||||||
|
Now we're getting:
|
||||||
|
```text
|
||||||
|
==Explanation from the agent==
|
||||||
|
I will use the following tools: `image_generator` to generate an image of a house, then `image_generator` to generate an image of a car.
|
||||||
|
|
||||||
|
|
||||||
|
==Code generated by the agent==
|
||||||
|
house_image = image_generator(prompt="A house")
|
||||||
|
car_image = image_generator(prompt="A car")
|
||||||
|
```
|
||||||
|
|
||||||
|
which is definitely closer to what we had in mind! However, we want to have both the house and car in the same image. Steering the task more toward single image generation should help:
|
||||||
|
|
||||||
|
```py
|
||||||
|
agent.run("Create image: 'A house and car'", return_code=True)
|
||||||
|
```
|
||||||
|
|
||||||
|
```text
|
||||||
|
==Explanation from the agent==
|
||||||
|
I will use the following tool: `image_generator` to generate an image.
|
||||||
|
|
||||||
|
|
||||||
|
==Code generated by the agent==
|
||||||
|
image = image_generator(prompt="A house and car")
|
||||||
|
```
|
||||||
|
|
||||||
|
<Tip warning={true}>
|
||||||
|
|
||||||
|
Agents are still brittle for many use cases, especially when it comes to
|
||||||
|
slightly more complex use cases like generating an image of multiple objects.
|
||||||
|
Both the agent itself and the underlying prompt will be further improved in the coming
|
||||||
|
months making sure that agents become more robust to a variety of user inputs.
|
||||||
|
|
||||||
|
</Tip>
|
||||||
|
|
||||||
|
### Customizing the whole prompt
|
||||||
|
|
||||||
|
To give the user maximum flexibility, the whole prompt template as explained in [above](#structure-of-the-prompt)
|
||||||
|
can be overwritten by the user. In this case make sure that your custom prompt includes an introduction section,
|
||||||
|
a tool section, an example section, and an unfinished example section. If you want to overwrite the `run` prompt template,
|
||||||
|
you can do as follows:
|
||||||
|
|
||||||
|
```py
|
||||||
|
template = """ [...] """
|
||||||
|
|
||||||
|
agent = HfAgent(your_endpoint, run_prompt_template=template)
|
||||||
|
```
|
||||||
|
|
||||||
|
<Tip warning={true}>
|
||||||
|
|
||||||
|
Please make sure to have the `<<all_tools>>` string and the `<<prompt>>` defined somewhere in the `template` so that the agent can be aware
|
||||||
|
of the tools, it has available to it as well as correctly insert the user's prompt.
|
||||||
|
|
||||||
|
</Tip>
|
||||||
|
|
||||||
|
Similarly, one can overwrite the `chat` prompt template. Note that the `chat` mode always uses the following format for the exchanges:
|
||||||
|
```text
|
||||||
|
Human: <<task>>
|
||||||
|
|
||||||
|
Assistant:
|
||||||
|
```
|
||||||
|
|
||||||
|
Therefore it is important that the examples of the custom `chat` prompt template also make use of this format.
|
||||||
|
You can overwrite the `chat` template at instantiation as follows.
|
||||||
|
|
||||||
|
```
|
||||||
|
template = """ [...] """
|
||||||
|
|
||||||
|
agent = HfAgent(url_endpoint=your_endpoint, chat_prompt_template=template)
|
||||||
|
```
|
||||||
|
|
||||||
|
<Tip warning={true}>
|
||||||
|
|
||||||
|
Please make sure to have the `<<all_tools>>` string defined somewhere in the `template` so that the agent can be aware
|
||||||
|
of the tools, it has available to it.
|
||||||
|
|
||||||
|
</Tip>
|
||||||
|
|
||||||
|
In both cases, you can pass a repo ID instead of the prompt template if you would like to use a template hosted by someone in the community. The default prompts live in [this repo](https://huggingface.co/datasets/huggingface-tools/default-prompts) as an example.
|
||||||
|
|
||||||
|
To upload your custom prompt on a repo on the Hub and share it with the community just make sure:
|
||||||
|
- to use a dataset repository
|
||||||
|
- to put the prompt template for the `run` command in a file named `run_prompt_template.txt`
|
||||||
|
- to put the prompt template for the `chat` command in a file named `chat_prompt_template.txt`
|
||||||
|
|
||||||
|
## Using custom tools
|
||||||
|
|
||||||
|
In this section, we'll be leveraging two existing custom tools that are specific to image generation:
|
||||||
|
|
||||||
|
- We replace [huggingface-tools/image-transformation](https://huggingface.co/spaces/huggingface-tools/image-transformation),
|
||||||
|
with [diffusers/controlnet-canny-tool](https://huggingface.co/spaces/diffusers/controlnet-canny-tool)
|
||||||
|
to allow for more image modifications.
|
||||||
|
- We add a new tool for image upscaling to the default toolbox:
|
||||||
|
[diffusers/latent-upscaler-tool](https://huggingface.co/spaces/diffusers/latent-upscaler-tool) replace the existing image-transformation tool.
|
||||||
|
|
||||||
|
We'll start by loading the custom tools with the convenient [`load_tool`] function:
|
||||||
|
|
||||||
|
```py
|
||||||
|
from transformers import load_tool
|
||||||
|
|
||||||
|
controlnet_transformer = load_tool("diffusers/controlnet-canny-tool")
|
||||||
|
upscaler = load_tool("diffusers/latent-upscaler-tool")
|
||||||
|
```
|
||||||
|
|
||||||
|
Upon adding custom tools to an agent, the tools' descriptions and names are automatically
|
||||||
|
included in the agents' prompts. Thus, it is imperative that custom tools have
|
||||||
|
a well-written description and name in order for the agent to understand how to use them.
|
||||||
|
Let's take a look at the description and name of `controlnet_transformer`:
|
||||||
|
|
||||||
|
```py
|
||||||
|
print(f"Description: '{controlnet_transformer.description}'")
|
||||||
|
print(f"Name: '{controlnet_transformer.name}'")
|
||||||
|
```
|
||||||
|
|
||||||
|
gives
|
||||||
|
```text
|
||||||
|
Description: 'This is a tool that transforms an image with ControlNet according to a prompt.
|
||||||
|
It takes two inputs: `image`, which should be the image to transform, and `prompt`, which should be the prompt to use to change it. It returns the modified image.'
|
||||||
|
Name: 'image_transformer'
|
||||||
|
```
|
||||||
|
|
||||||
|
The name and description are accurate and fit the style of the [curated set of tools](./transformers_agents#a-curated-set-of-tools).
|
||||||
|
Next, let's instantiate an agent with `controlnet_transformer` and `upscaler`:
|
||||||
|
|
||||||
|
```py
|
||||||
|
tools = [controlnet_transformer, upscaler]
|
||||||
|
agent = HfAgent("https://api-inference.huggingface.co/models/bigcode/starcoder", additional_tools=tools)
|
||||||
|
```
|
||||||
|
|
||||||
|
This command should give you the following info:
|
||||||
|
|
||||||
|
```text
|
||||||
|
image_transformer has been replaced by <transformers_modules.diffusers.controlnet-canny-tool.bd76182c7777eba9612fc03c0
|
||||||
|
8718a60c0aa6312.image_transformation.ControlNetTransformationTool object at 0x7f1d3bfa3a00> as provided in `additional_tools`
|
||||||
|
```
|
||||||
|
|
||||||
|
The set of curated tools already has an `image_transformer` tool which is hereby replaced with our custom tool.
|
||||||
|
|
||||||
|
<Tip>
|
||||||
|
|
||||||
|
Overwriting existing tools can be beneficial if we want to use a custom tool exactly for the same task as an existing tool
|
||||||
|
because the agent is well-versed in using the specific task. Beware that the custom tool should follow the exact same API
|
||||||
|
as the overwritten tool in this case, or you should adapt the prompt template to make sure all examples using that
|
||||||
|
tool are updated.
|
||||||
|
|
||||||
|
</Tip>
|
||||||
|
|
||||||
|
The upscaler tool was given the name `image_upscaler` which is not yet present in the default toolbox and is therefore simply added to the list of tools.
|
||||||
|
You can always have a look at the toolbox that is currently available to the agent via the `agent.toolbox` attribute:
|
||||||
|
|
||||||
|
```py
|
||||||
|
print("\n".join([f"- {a}" for a in agent.toolbox.keys()]))
|
||||||
|
```
|
||||||
|
|
||||||
|
```text
|
||||||
|
- document_qa
|
||||||
|
- image_captioner
|
||||||
|
- image_qa
|
||||||
|
- image_segmenter
|
||||||
|
- transcriber
|
||||||
|
- summarizer
|
||||||
|
- text_classifier
|
||||||
|
- text_qa
|
||||||
|
- text_reader
|
||||||
|
- translator
|
||||||
|
- image_transformer
|
||||||
|
- text_downloader
|
||||||
|
- image_generator
|
||||||
|
- video_generator
|
||||||
|
- image_upscaler
|
||||||
|
```
|
||||||
|
|
||||||
|
Note how `image_upscaler` is now part of the agents' toolbox.
|
||||||
|
|
||||||
|
Let's now try out the new tools! We will re-use the image we generated in [Transformers Agents Quickstart](./transformers_agents#single-execution-run).
|
||||||
|
|
||||||
|
```py
|
||||||
|
from diffusers.utils import load_image
|
||||||
|
|
||||||
|
image = load_image(
|
||||||
|
"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/rivers_and_lakes.png"
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/rivers_and_lakes.png" width=200>
|
||||||
|
|
||||||
|
Let's transform the image into a beautiful winter landscape:
|
||||||
|
|
||||||
|
```py
|
||||||
|
image = agent.run("Transform the image: 'A frozen lake and snowy forest'", image=image)
|
||||||
|
```
|
||||||
|
|
||||||
|
```text
|
||||||
|
==Explanation from the agent==
|
||||||
|
I will use the following tool: `image_transformer` to transform the image.
|
||||||
|
|
||||||
|
|
||||||
|
==Code generated by the agent==
|
||||||
|
image = image_transformer(image, prompt="A frozen lake and snowy forest")
|
||||||
|
```
|
||||||
|
|
||||||
|
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/rivers_and_lakes_winter.png" width=200>
|
||||||
|
|
||||||
|
The new image processing tool is based on ControlNet which can make very strong modifications to the image.
|
||||||
|
By default the image processing tool returns an image of size 512x512 pixels. Let's see if we can upscale it.
|
||||||
|
|
||||||
|
```py
|
||||||
|
image = agent.run("Upscale the image", image)
|
||||||
|
```
|
||||||
|
|
||||||
|
```text
|
||||||
|
==Explanation from the agent==
|
||||||
|
I will use the following tool: `image_upscaler` to upscale the image.
|
||||||
|
|
||||||
|
|
||||||
|
==Code generated by the agent==
|
||||||
|
upscaled_image = image_upscaler(image)
|
||||||
|
```
|
||||||
|
|
||||||
|
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/rivers_and_lakes_winter_upscale.png" width=400>
|
||||||
|
|
||||||
|
The agent automatically mapped our prompt "Upscale the image" to the just added upscaler tool purely based on the description and name of the upscaler tool
|
||||||
|
and was able to correctly run it.
|
||||||
|
|
||||||
|
Next, let's have a look at how you can create a new custom tool.
|
||||||
|
|
||||||
|
### Adding new tools
|
||||||
|
|
||||||
|
In this section, we show how to create a new tool that can be added to the agent.
|
||||||
|
|
||||||
|
#### Creating a new tool
|
||||||
|
|
||||||
|
We'll first start by creating a tool. We'll add the not-so-useful yet fun task of fetching the model on the Hugging Face
|
||||||
|
Hub with the most downloads for a given task.
|
||||||
|
|
||||||
|
We can do that with the following code:
|
||||||
|
|
||||||
|
```python
|
||||||
|
from huggingface_hub import list_models
|
||||||
|
|
||||||
|
task = "text-classification"
|
||||||
|
|
||||||
|
model = next(iter(list_models(filter=task, sort="downloads", direction=-1)))
|
||||||
|
print(model.id)
|
||||||
|
```
|
||||||
|
|
||||||
|
For the task `text-classification`, this returns `'facebook/bart-large-mnli'`, for `translation` it returns `'t5-base`.
|
||||||
|
|
||||||
|
How do we convert this to a tool that the agent can leverage? All tools depend on the superclass `Tool` that holds the
|
||||||
|
main attributes necessary. We'll create a class that inherits from it:
|
||||||
|
|
||||||
|
```python
|
||||||
|
from transformers import Tool
|
||||||
|
|
||||||
|
|
||||||
|
class HFModelDownloadsTool(Tool):
|
||||||
|
pass
|
||||||
|
```
|
||||||
|
|
||||||
|
This class has a few needs:
|
||||||
|
- An attribute `name`, which corresponds to the name of the tool itself. To be in tune with other tools which have a
|
||||||
|
performative name, we'll name it `model_download_counter`.
|
||||||
|
- An attribute `description`, which will be used to populate the prompt of the agent.
|
||||||
|
- `inputs` and `outputs` attributes. Defining this will help the python interpreter make educated choices about types,
|
||||||
|
and will allow for a gradio-demo to be spawned when we push our tool to the Hub. They're both a list of expected
|
||||||
|
values, which can be `text`, `image`, or `audio`.
|
||||||
|
- A `__call__` method which contains the inference code. This is the code we've played with above!
|
||||||
|
|
||||||
|
Here's what our class looks like now:
|
||||||
|
|
||||||
|
```python
|
||||||
|
from transformers import Tool
|
||||||
|
from huggingface_hub import list_models
|
||||||
|
|
||||||
|
|
||||||
|
class HFModelDownloadsTool(Tool):
|
||||||
|
name = "model_download_counter"
|
||||||
|
description = (
|
||||||
|
"This is a tool that returns the most downloaded model of a given task on the Hugging Face Hub. "
|
||||||
|
"It takes the name of the category (such as text-classification, depth-estimation, etc), and "
|
||||||
|
"returns the name of the checkpoint."
|
||||||
|
)
|
||||||
|
|
||||||
|
inputs = ["text"]
|
||||||
|
outputs = ["text"]
|
||||||
|
|
||||||
|
def __call__(self, task: str):
|
||||||
|
model = next(iter(list_models(filter=task, sort="downloads", direction=-1)))
|
||||||
|
return model.id
|
||||||
|
```
|
||||||
|
|
||||||
|
We now have our tool handy. Save it in a file and import it from your main script. Let's name this file
|
||||||
|
`model_downloads.py`, so the resulting import code looks like this:
|
||||||
|
|
||||||
|
```python
|
||||||
|
from model_downloads import HFModelDownloadsTool
|
||||||
|
|
||||||
|
tool = HFModelDownloadsTool()
|
||||||
|
```
|
||||||
|
|
||||||
|
In order to let others benefit from it and for simpler initialization, we recommend pushing it to the Hub under your
|
||||||
|
namespace. To do so, just call `push_to_hub` on the `tool` variable:
|
||||||
|
|
||||||
|
```python
|
||||||
|
tool.push_to_hub("hf-model-downloads")
|
||||||
|
```
|
||||||
|
|
||||||
|
You now have your code on the Hub! Let's take a look at the final step, which is to have the agent use it.
|
||||||
|
|
||||||
|
#### Having the agent use the tool
|
||||||
|
|
||||||
|
We now have our tool that lives on the Hub which can be instantiated as such (change the user name for your tool):
|
||||||
|
|
||||||
|
```python
|
||||||
|
from transformers import load_tool
|
||||||
|
|
||||||
|
tool = load_tool("lysandre/hf-model-downloads")
|
||||||
|
```
|
||||||
|
|
||||||
|
In order to use it in the agent, simply pass it in the `additional_tools` parameter of the agent initialization method:
|
||||||
|
|
||||||
|
```python
|
||||||
|
from transformers import HfAgent
|
||||||
|
|
||||||
|
agent = HfAgent("https://api-inference.huggingface.co/models/bigcode/starcoder", additional_tools=[tool])
|
||||||
|
|
||||||
|
agent.run(
|
||||||
|
"Can you read out loud the name of the model that has the most downloads in the 'text-to-video' task on the Hugging Face Hub?"
|
||||||
|
)
|
||||||
|
```
|
||||||
|
which outputs the following:
|
||||||
|
```text
|
||||||
|
==Code generated by the agent==
|
||||||
|
model = model_download_counter(task="text-to-video")
|
||||||
|
print(f"The model with the most downloads is {model}.")
|
||||||
|
audio_model = text_reader(model)
|
||||||
|
|
||||||
|
|
||||||
|
==Result==
|
||||||
|
The model with the most downloads is damo-vilab/text-to-video-ms-1.7b.
|
||||||
|
```
|
||||||
|
|
||||||
|
and generates the following audio.
|
||||||
|
|
||||||
|
| **Audio** |
|
||||||
|
|------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||||
|
| <audio controls><source src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/damo.wav" type="audio/wav"/> |
|
||||||
|
|
||||||
|
|
||||||
|
<Tip>
|
||||||
|
|
||||||
|
Depending on the LLM, some are quite brittle and require very exact prompts in order to work well. Having a well-defined
|
||||||
|
name and description of the tool is paramount to having it be leveraged by the agent.
|
||||||
|
|
||||||
|
</Tip>
|
||||||
|
|
||||||
|
### Replacing existing tools
|
||||||
|
|
||||||
|
Replacing existing tools can be done simply by assigning a new item to the agent's toolbox. Here's how one would do so:
|
||||||
|
|
||||||
|
```python
|
||||||
|
from transformers import HfAgent, load_tool
|
||||||
|
|
||||||
|
agent = HfAgent("https://api-inference.huggingface.co/models/bigcode/starcoder")
|
||||||
|
agent.toolbox["image-transformation"] = load_tool("diffusers/controlnet-canny-tool")
|
||||||
|
```
|
||||||
|
|
||||||
|
<Tip>
|
||||||
|
|
||||||
|
Beware when replacing tools with others! This will also adjust the agent's prompt. This can be good if you have a better
|
||||||
|
prompt suited for the task, but it can also result in your tool being selected way more than others or for other
|
||||||
|
tools to be selected instead of the one you have defined.
|
||||||
|
|
||||||
|
</Tip>
|
||||||
|
|
||||||
|
## Leveraging gradio-tools
|
||||||
|
|
||||||
|
[gradio-tools](https://github.com/freddyaboulton/gradio-tools) is a powerful library that allows using Hugging
|
||||||
|
Face Spaces as tools. It supports many existing Spaces as well as custom Spaces to be designed with it.
|
||||||
|
|
||||||
|
We offer support for `gradio_tools` by using the `Tool.from_gradio` method. For example, we want to take
|
||||||
|
advantage of the `StableDiffusionPromptGeneratorTool` tool offered in the `gradio-tools` toolkit so as to
|
||||||
|
improve our prompts and generate better images.
|
||||||
|
|
||||||
|
We first import the tool from `gradio_tools` and instantiate it:
|
||||||
|
|
||||||
|
```python
|
||||||
|
from gradio_tools import StableDiffusionPromptGeneratorTool
|
||||||
|
|
||||||
|
gradio_tool = StableDiffusionPromptGeneratorTool()
|
||||||
|
```
|
||||||
|
|
||||||
|
We pass that instance to the `Tool.from_gradio` method:
|
||||||
|
|
||||||
|
```python
|
||||||
|
from transformers import Tool
|
||||||
|
|
||||||
|
tool = Tool.from_gradio(gradio_tool)
|
||||||
|
```
|
||||||
|
|
||||||
|
Now we can manage it exactly as we would a usual custom tool. We leverage it to improve our prompt
|
||||||
|
` a rabbit wearing a space suit`:
|
||||||
|
|
||||||
|
```python
|
||||||
|
from transformers import HfAgent
|
||||||
|
|
||||||
|
agent = HfAgent("https://api-inference.huggingface.co/models/bigcode/starcoder", additional_tools=[tool])
|
||||||
|
|
||||||
|
agent.run("Generate an image of the `prompt` after improving it.", prompt="A rabbit wearing a space suit")
|
||||||
|
```
|
||||||
|
|
||||||
|
The model adequately leverages the tool:
|
||||||
|
```text
|
||||||
|
==Explanation from the agent==
|
||||||
|
I will use the following tools: `StableDiffusionPromptGenerator` to improve the prompt, then `image_generator` to generate an image according to the improved prompt.
|
||||||
|
|
||||||
|
|
||||||
|
==Code generated by the agent==
|
||||||
|
improved_prompt = StableDiffusionPromptGenerator(prompt)
|
||||||
|
print(f"The improved prompt is {improved_prompt}.")
|
||||||
|
image = image_generator(improved_prompt)
|
||||||
|
```
|
||||||
|
|
||||||
|
Before finally generating the image:
|
||||||
|
|
||||||
|
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/rabbit.png">
|
||||||
|
|
||||||
|
<Tip warning={true}>
|
||||||
|
|
||||||
|
gradio-tools requires *textual* inputs and outputs, even when working with different modalities. This implementation
|
||||||
|
works with image and audio objects. The two are currently incompatible, but will rapidly become compatible as we
|
||||||
|
work to improve the support.
|
||||||
|
|
||||||
|
</Tip>
|
||||||
|
|
||||||
|
## Future compatibility with Langchain
|
||||||
|
|
||||||
|
We love Langchain and think it has a very compelling suite of tools. In order to handle these tools,
|
||||||
|
Langchain requires *textual* inputs and outputs, even when working with different modalities.
|
||||||
|
This is often the serialized version (i.e., saved to disk) of the objects.
|
||||||
|
|
||||||
|
This difference means that multi-modality isn't handled between transformers-agents and langchain.
|
||||||
|
We aim for this limitation to be resolved in future versions, and welcome any help from avid langchain
|
||||||
|
users to help us achieve this compatibility.
|
||||||
|
|
||||||
|
We would love to have better support. If you would like to help, please
|
||||||
|
[open an issue](https://github.com/huggingface/transformers/issues/new) and share what you have in mind.
|
||||||
@ -8,6 +8,10 @@ http://www.apache.org/licenses/LICENSE-2.0
|
|||||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
specific language governing permissions and limitations under the License.
|
specific language governing permissions and limitations under the License.
|
||||||
|
|
||||||
|
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||||
|
rendered properly in your Markdown viewer.
|
||||||
|
|
||||||
-->
|
-->
|
||||||
|
|
||||||
# Debugging
|
# Debugging
|
||||||
@ -276,7 +280,7 @@ from transformers.debug_utils import DebugUnderflowOverflow
|
|||||||
debug_overflow = DebugUnderflowOverflow(model, max_frames_to_save=100)
|
debug_overflow = DebugUnderflowOverflow(model, max_frames_to_save=100)
|
||||||
```
|
```
|
||||||
|
|
||||||
### Specific batch absolute mix and max value tracing
|
### Specific batch absolute min and max value tracing
|
||||||
|
|
||||||
The same debugging class can be used for per-batch tracing with the underflow/overflow detection feature turned off.
|
The same debugging class can be used for per-batch tracing with the underflow/overflow detection feature turned off.
|
||||||
|
|
||||||
@ -8,6 +8,10 @@ http://www.apache.org/licenses/LICENSE-2.0
|
|||||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
specific language governing permissions and limitations under the License.
|
specific language governing permissions and limitations under the License.
|
||||||
|
|
||||||
|
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||||
|
rendered properly in your Markdown viewer.
|
||||||
|
|
||||||
-->
|
-->
|
||||||
|
|
||||||
# Use tokenizers from 🤗 Tokenizers
|
# Use tokenizers from 🤗 Tokenizers
|
||||||
386
docs/source/en/generation_strategies.md
Normal file
386
docs/source/en/generation_strategies.md
Normal file
@ -0,0 +1,386 @@
|
|||||||
|
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
|
the License. You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
|
specific language governing permissions and limitations under the License.
|
||||||
|
|
||||||
|
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||||
|
rendered properly in your Markdown viewer.
|
||||||
|
|
||||||
|
-->
|
||||||
|
|
||||||
|
# Text generation strategies
|
||||||
|
|
||||||
|
Text generation is essential to many NLP tasks, such as open-ended text generation, summarization, translation, and
|
||||||
|
more. It also plays a role in a variety of mixed-modality applications that have text as an output like speech-to-text
|
||||||
|
and vision-to-text. Some of the models that can generate text include
|
||||||
|
GPT2, XLNet, OpenAI GPT, CTRL, TransformerXL, XLM, Bart, T5, GIT, Whisper.
|
||||||
|
|
||||||
|
Check out a few examples that use [`~transformers.generation_utils.GenerationMixin.generate`] method to produce
|
||||||
|
text outputs for different tasks:
|
||||||
|
* [Text summarization](./tasks/summarization#inference)
|
||||||
|
* [Image captioning](./model_doc/git#transformers.GitForCausalLM.forward.example)
|
||||||
|
* [Audio transcription](./model_doc/whisper#transformers.WhisperForConditionalGeneration.forward.example)
|
||||||
|
|
||||||
|
Note that the inputs to the generate method depend on the model's modality. They are returned by the model's preprocessor
|
||||||
|
class, such as AutoTokenizer or AutoProcessor. If a model's preprocessor creates more than one kind of input, pass all
|
||||||
|
the inputs to generate(). You can learn more about the individual model's preprocessor in the corresponding model's documentation.
|
||||||
|
|
||||||
|
The process of selecting output tokens to generate text is known as decoding, and you can customize the decoding strategy
|
||||||
|
that the `generate()` method will use. Modifying a decoding strategy does not change the values of any trainable parameters.
|
||||||
|
However, it can have a noticeable impact on the quality of the generated output. It can help reduce repetition in the text
|
||||||
|
and make it more coherent.
|
||||||
|
|
||||||
|
This guide describes:
|
||||||
|
* default generation configuration
|
||||||
|
* common decoding strategies and their main parameters
|
||||||
|
* saving and sharing custom generation configurations with your fine-tuned model on 🤗 Hub
|
||||||
|
|
||||||
|
## Default text generation configuration
|
||||||
|
|
||||||
|
A decoding strategy for a model is defined in its generation configuration. When using pre-trained models for inference
|
||||||
|
within a [`pipeline`], the models call the `PreTrainedModel.generate()` method that applies a default generation
|
||||||
|
configuration under the hood. The default configuration is also used when no custom configuration has been saved with
|
||||||
|
the model.
|
||||||
|
|
||||||
|
When you load a model explicitly, you can inspect the generation configuration that comes with it through
|
||||||
|
`model.generation_config`:
|
||||||
|
|
||||||
|
```python
|
||||||
|
>>> from transformers import AutoModelForCausalLM
|
||||||
|
|
||||||
|
>>> model = AutoModelForCausalLM.from_pretrained("distilgpt2")
|
||||||
|
>>> model.generation_config
|
||||||
|
GenerationConfig {
|
||||||
|
"_from_model_config": true,
|
||||||
|
"bos_token_id": 50256,
|
||||||
|
"eos_token_id": 50256,
|
||||||
|
"transformers_version": "4.26.0.dev0"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Printing out the `model.generation_config` reveals only the values that are different from the default generation
|
||||||
|
configuration, and does not list any of the default values.
|
||||||
|
|
||||||
|
The default generation configuration limits the size of the output combined with the input prompt to a maximum of 20
|
||||||
|
tokens to avoid running into resource limitations. The default decoding strategy is greedy search, which is the simplest decoding strategy that picks a token with the highest probability as the next token. For many tasks
|
||||||
|
and small output sizes this works well. However, when used to generate longer outputs, greedy search can start
|
||||||
|
producing highly repetitive results.
|
||||||
|
|
||||||
|
## Customize text generation
|
||||||
|
|
||||||
|
You can override any `generation_config` by passing the parameters and their values directly to the [`generate`] method:
|
||||||
|
|
||||||
|
```python
|
||||||
|
>>> my_model.generate(**inputs, num_beams=4, do_sample=True)
|
||||||
|
```
|
||||||
|
|
||||||
|
Even if the default decoding strategy mostly works for your task, you can still tweak a few things. Some of the
|
||||||
|
commonly adjusted parameters include:
|
||||||
|
|
||||||
|
- `max_new_tokens`: the maximum number of tokens to generate. In other words, the size of the output sequence, not
|
||||||
|
including the tokens in the prompt.
|
||||||
|
- `num_beams`: by specifying a number of beams higher than 1, you are effectively switching from greedy search to
|
||||||
|
beam search. This strategy evaluates several hypotheses at each time step and eventually chooses the hypothesis that
|
||||||
|
has the overall highest probability for the entire sequence. This has the advantage of identifying high-probability
|
||||||
|
sequences that start with a lower probability initial tokens and would've been ignored by the greedy search.
|
||||||
|
- `do_sample`: if set to `True`, this parameter enables decoding strategies such as multinomial sampling, beam-search
|
||||||
|
multinomial sampling, Top-K sampling and Top-p sampling. All these strategies select the next token from the probability
|
||||||
|
distribution over the entire vocabulary with various strategy-specific adjustments.
|
||||||
|
- `num_return_sequences`: the number of sequence candidates to return for each input. This options is only available for
|
||||||
|
the decoding strategies that support multiple sequence candidates, e.g. variations of beam search and sampling. Decoding
|
||||||
|
strategies like greedy search and contrastive search return a single output sequence.
|
||||||
|
|
||||||
|
## Save a custom decoding strategy with your model
|
||||||
|
|
||||||
|
If you would like to share your fine-tuned model with a specific generation configuration, you can:
|
||||||
|
* Create a [`GenerationConfig`] class instance
|
||||||
|
* Specify the decoding strategy parameters
|
||||||
|
* Save your generation configuration with [`GenerationConfig.save_pretrained`], making sure to leave its `config_file_name` argument empty
|
||||||
|
* Set `push_to_hub` to `True` to upload your config to the model's repo
|
||||||
|
|
||||||
|
```python
|
||||||
|
>>> from transformers import AutoModelForCausalLM, GenerationConfig
|
||||||
|
|
||||||
|
>>> model = AutoModelForCausalLM.from_pretrained("my_account/my_model")
|
||||||
|
>>> generation_config = GenerationConfig(
|
||||||
|
... max_new_tokens=50, do_sample=True, top_k=50, eos_token_id=model.config.eos_token_id
|
||||||
|
... )
|
||||||
|
>>> generation_config.save_pretrained("my_account/my_model", push_to_hub=True)
|
||||||
|
```
|
||||||
|
|
||||||
|
You can also store several generation configurations in a single directory, making use of the `config_file_name`
|
||||||
|
argument in [`GenerationConfig.save_pretrained`]. You can later instantiate them with [`GenerationConfig.from_pretrained`]. This is useful if you want to
|
||||||
|
store several generation configurations for a single model (e.g. one for creative text generation with sampling, and
|
||||||
|
one for summarization with beam search). You must have the right Hub permissions to add configuration files to a model.
|
||||||
|
|
||||||
|
```python
|
||||||
|
>>> from transformers import AutoModelForSeq2SeqLM, AutoTokenizer, GenerationConfig
|
||||||
|
|
||||||
|
>>> tokenizer = AutoTokenizer.from_pretrained("t5-small")
|
||||||
|
>>> model = AutoModelForSeq2SeqLM.from_pretrained("t5-small")
|
||||||
|
|
||||||
|
>>> translation_generation_config = GenerationConfig(
|
||||||
|
... num_beams=4,
|
||||||
|
... early_stopping=True,
|
||||||
|
... decoder_start_token_id=0,
|
||||||
|
... eos_token_id=model.config.eos_token_id,
|
||||||
|
... pad_token=model.config.pad_token_id,
|
||||||
|
... )
|
||||||
|
|
||||||
|
>>> translation_generation_config.save_pretrained("t5-small", "translation_generation_config.json", push_to_hub=True)
|
||||||
|
|
||||||
|
>>> # You could then use the named generation config file to parameterize generation
|
||||||
|
>>> generation_config = GenerationConfig.from_pretrained("t5-small", "translation_generation_config.json")
|
||||||
|
>>> inputs = tokenizer("translate English to French: Configuration files are easy to use!", return_tensors="pt")
|
||||||
|
>>> outputs = model.generate(**inputs, generation_config=generation_config)
|
||||||
|
>>> print(tokenizer.batch_decode(outputs, skip_special_tokens=True))
|
||||||
|
['Les fichiers de configuration sont faciles à utiliser !']
|
||||||
|
```
|
||||||
|
|
||||||
|
## Streaming
|
||||||
|
|
||||||
|
The `generate()` supports streaming, through its `streamer` input. The `streamer` input is compatible any instance
|
||||||
|
from a class that has the following methods: `put()` and `end()`. Internally, `put()` is used to push new tokens and
|
||||||
|
`end()` is used to flag the end of text generation.
|
||||||
|
|
||||||
|
<Tip warning={true}>
|
||||||
|
|
||||||
|
The API for the streamer classes is still under development and may change in the future.
|
||||||
|
|
||||||
|
</Tip>
|
||||||
|
|
||||||
|
In practice, you can craft your own streaming class for all sorts of purposes! We also have basic streaming classes
|
||||||
|
ready for you to use. For example, you can use the [`TextStreamer`] class to stream the output of `generate()` into
|
||||||
|
your screen, one word at a time:
|
||||||
|
|
||||||
|
```python
|
||||||
|
>>> from transformers import AutoModelForCausalLM, AutoTokenizer, TextStreamer
|
||||||
|
|
||||||
|
>>> tok = AutoTokenizer.from_pretrained("gpt2")
|
||||||
|
>>> model = AutoModelForCausalLM.from_pretrained("gpt2")
|
||||||
|
>>> inputs = tok(["An increasing sequence: one,"], return_tensors="pt")
|
||||||
|
>>> streamer = TextStreamer(tok)
|
||||||
|
|
||||||
|
>>> # Despite returning the usual output, the streamer will also print the generated text to stdout.
|
||||||
|
>>> _ = model.generate(**inputs, streamer=streamer, max_new_tokens=20)
|
||||||
|
An increasing sequence: one, two, three, four, five, six, seven, eight, nine, ten, eleven,
|
||||||
|
```
|
||||||
|
|
||||||
|
## Decoding strategies
|
||||||
|
|
||||||
|
Certain combinations of the `generate()` parameters, and ultimately `generation_config`, can be used to enable specific
|
||||||
|
decoding strategies. If you are new to this concept, we recommend reading [this blog post that illustrates how common decoding strategies work](https://huggingface.co/blog/how-to-generate).
|
||||||
|
|
||||||
|
Here, we'll show some of the parameters that control the decoding strategies and illustrate how you can use them.
|
||||||
|
|
||||||
|
### Greedy Search
|
||||||
|
|
||||||
|
[`generate`] uses greedy search decoding by default so you don't have to pass any parameters to enable it. This means the parameters `num_beams` is set to 1 and `do_sample=False`.
|
||||||
|
|
||||||
|
```python
|
||||||
|
>>> from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||||
|
|
||||||
|
>>> prompt = "I look forward to"
|
||||||
|
>>> checkpoint = "distilgpt2"
|
||||||
|
|
||||||
|
>>> tokenizer = AutoTokenizer.from_pretrained(checkpoint)
|
||||||
|
>>> inputs = tokenizer(prompt, return_tensors="pt")
|
||||||
|
|
||||||
|
>>> model = AutoModelForCausalLM.from_pretrained(checkpoint)
|
||||||
|
>>> outputs = model.generate(**inputs)
|
||||||
|
>>> tokenizer.batch_decode(outputs, skip_special_tokens=True)
|
||||||
|
['I look forward to seeing you all again!\n\n\n\n\n\n\n\n\n\n\n']
|
||||||
|
```
|
||||||
|
|
||||||
|
### Contrastive search
|
||||||
|
|
||||||
|
The contrastive search decoding strategy was proposed in the 2022 paper [A Contrastive Framework for Neural Text Generation](https://arxiv.org/abs/2202.06417).
|
||||||
|
It demonstrates superior results for generating non-repetitive yet coherent long outputs. To learn how contrastive search
|
||||||
|
works, check out [this blog post](https://huggingface.co/blog/introducing-csearch).
|
||||||
|
The two main parameters that enable and control the behavior of contrastive search are `penalty_alpha` and `top_k`:
|
||||||
|
|
||||||
|
```python
|
||||||
|
>>> from transformers import AutoTokenizer, AutoModelForCausalLM
|
||||||
|
|
||||||
|
>>> checkpoint = "gpt2-large"
|
||||||
|
>>> tokenizer = AutoTokenizer.from_pretrained(checkpoint)
|
||||||
|
>>> model = AutoModelForCausalLM.from_pretrained(checkpoint)
|
||||||
|
|
||||||
|
>>> prompt = "Hugging Face Company is"
|
||||||
|
>>> inputs = tokenizer(prompt, return_tensors="pt")
|
||||||
|
|
||||||
|
>>> outputs = model.generate(**inputs, penalty_alpha=0.6, top_k=4, max_new_tokens=100)
|
||||||
|
>>> tokenizer.batch_decode(outputs, skip_special_tokens=True)
|
||||||
|
['Hugging Face Company is a family owned and operated business. \
|
||||||
|
We pride ourselves on being the best in the business and our customer service is second to none.\
|
||||||
|
\n\nIf you have any questions about our products or services, feel free to contact us at any time.\
|
||||||
|
We look forward to hearing from you!']
|
||||||
|
```
|
||||||
|
|
||||||
|
### Multinomial sampling
|
||||||
|
|
||||||
|
As opposed to greedy search that always chooses a token with the highest probability as the
|
||||||
|
next token, multinomial sampling (also called ancestral sampling) randomly selects the next token based on the probability distribution over the entire
|
||||||
|
vocabulary given by the model. Every token with a non-zero probability has a chance of being selected, thus reducing the
|
||||||
|
risk of repetition.
|
||||||
|
|
||||||
|
To enable multinomial sampling set `do_sample=True` and `num_beams=1`.
|
||||||
|
|
||||||
|
```python
|
||||||
|
>>> from transformers import AutoTokenizer, AutoModelForCausalLM
|
||||||
|
|
||||||
|
>>> checkpoint = "gpt2-large"
|
||||||
|
>>> tokenizer = AutoTokenizer.from_pretrained(checkpoint)
|
||||||
|
>>> model = AutoModelForCausalLM.from_pretrained(checkpoint)
|
||||||
|
|
||||||
|
>>> prompt = "Today was an amazing day because"
|
||||||
|
>>> inputs = tokenizer(prompt, return_tensors="pt")
|
||||||
|
|
||||||
|
>>> outputs = model.generate(**inputs, do_sample=True, num_beams=1, max_new_tokens=100)
|
||||||
|
>>> tokenizer.batch_decode(outputs, skip_special_tokens=True)
|
||||||
|
['Today was an amazing day because we are now in the final stages of our trip to New York City which was very tough. \
|
||||||
|
It is a difficult schedule and a challenging part of the year but still worth it. I have been taking things easier and \
|
||||||
|
I feel stronger and more motivated to be out there on their tour. Hopefully, that experience is going to help them with \
|
||||||
|
their upcoming events which are currently scheduled in Australia.\n\nWe love that they are here. They want to make a \
|
||||||
|
name for themselves and become famous for what they']
|
||||||
|
```
|
||||||
|
|
||||||
|
### Beam-search decoding
|
||||||
|
|
||||||
|
Unlike greedy search, beam-search decoding keeps several hypotheses at each time step and eventually chooses
|
||||||
|
the hypothesis that has the overall highest probability for the entire sequence. This has the advantage of identifying high-probability
|
||||||
|
sequences that start with lower probability initial tokens and would've been ignored by the greedy search.
|
||||||
|
|
||||||
|
To enable this decoding strategy, specify the `num_beams` (aka number of hypotheses to keep track of) that is greater than 1.
|
||||||
|
|
||||||
|
```python
|
||||||
|
>>> from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||||
|
|
||||||
|
>>> prompt = "It is astonishing how one can"
|
||||||
|
>>> checkpoint = "gpt2-medium"
|
||||||
|
|
||||||
|
>>> tokenizer = AutoTokenizer.from_pretrained(checkpoint)
|
||||||
|
>>> inputs = tokenizer(prompt, return_tensors="pt")
|
||||||
|
|
||||||
|
>>> model = AutoModelForCausalLM.from_pretrained(checkpoint)
|
||||||
|
|
||||||
|
>>> outputs = model.generate(**inputs, num_beams=5, max_new_tokens=50)
|
||||||
|
>>> tokenizer.batch_decode(outputs, skip_special_tokens=True)
|
||||||
|
['It is astonishing how one can have such a profound impact on the lives of so many people in such a short period of \
|
||||||
|
time."\n\nHe added: "I am very proud of the work I have been able to do in the last few years.\n\n"I have']
|
||||||
|
```
|
||||||
|
|
||||||
|
### Beam-search multinomial sampling
|
||||||
|
|
||||||
|
As the name implies, this decoding strategy combines beam search with multinomial sampling. You need to specify
|
||||||
|
the `num_beams` greater than 1, and set `do_sample=True` to use this decoding strategy.
|
||||||
|
|
||||||
|
```python
|
||||||
|
>>> from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
|
||||||
|
|
||||||
|
>>> prompt = "translate English to German: The house is wonderful."
|
||||||
|
>>> checkpoint = "t5-small"
|
||||||
|
|
||||||
|
>>> tokenizer = AutoTokenizer.from_pretrained(checkpoint)
|
||||||
|
>>> inputs = tokenizer(prompt, return_tensors="pt")
|
||||||
|
|
||||||
|
>>> model = AutoModelForSeq2SeqLM.from_pretrained(checkpoint)
|
||||||
|
|
||||||
|
>>> outputs = model.generate(**inputs, num_beams=5, do_sample=True)
|
||||||
|
>>> tokenizer.decode(outputs[0], skip_special_tokens=True)
|
||||||
|
'Das Haus ist wunderbar.'
|
||||||
|
```
|
||||||
|
|
||||||
|
### Diverse beam search decoding
|
||||||
|
|
||||||
|
The diverse beam search decoding strategy is an extension of the beam search strategy that allows for generating a more diverse
|
||||||
|
set of beam sequences to choose from. To learn how it works, refer to [Diverse Beam Search: Decoding Diverse Solutions from Neural Sequence Models](https://arxiv.org/pdf/1610.02424.pdf).
|
||||||
|
This approach has three main parameters: `num_beams`, `num_beam_groups`, and `diversity_penalty`.
|
||||||
|
The diversily penalty ensures the outputs are distinct across groups, and beam search is used within each group.
|
||||||
|
|
||||||
|
|
||||||
|
```python
|
||||||
|
>>> from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
|
||||||
|
|
||||||
|
>>> checkpoint = "google/pegasus-xsum"
|
||||||
|
>>> prompt = "The Permaculture Design Principles are a set of universal design principles \
|
||||||
|
>>> that can be applied to any location, climate and culture, and they allow us to design \
|
||||||
|
>>> the most efficient and sustainable human habitation and food production systems. \
|
||||||
|
>>> Permaculture is a design system that encompasses a wide variety of disciplines, such \
|
||||||
|
>>> as ecology, landscape design, environmental science and energy conservation, and the \
|
||||||
|
>>> Permaculture design principles are drawn from these various disciplines. Each individual \
|
||||||
|
>>> design principle itself embodies a complete conceptual framework based on sound \
|
||||||
|
>>> scientific principles. When we bring all these separate principles together, we can \
|
||||||
|
>>> create a design system that both looks at whole systems, the parts that these systems \
|
||||||
|
>>> consist of, and how those parts interact with each other to create a complex, dynamic, \
|
||||||
|
>>> living system. Each design principle serves as a tool that allows us to integrate all \
|
||||||
|
>>> the separate parts of a design, referred to as elements, into a functional, synergistic, \
|
||||||
|
>>> whole system, where the elements harmoniously interact and work together in the most \
|
||||||
|
>>> efficient way possible."
|
||||||
|
|
||||||
|
>>> tokenizer = AutoTokenizer.from_pretrained(checkpoint)
|
||||||
|
>>> inputs = tokenizer(prompt, return_tensors="pt")
|
||||||
|
|
||||||
|
>>> model = AutoModelForSeq2SeqLM.from_pretrained(checkpoint)
|
||||||
|
|
||||||
|
>>> outputs = model.generate(**inputs, num_beams=5, num_beam_groups=5, max_new_tokens=30, diversity_penalty=1.0)
|
||||||
|
>>> tokenizer.decode(outputs[0], skip_special_tokens=True)
|
||||||
|
'The aim of this project is to create a new type of living system, one that is more sustainable and efficient than the current one.'
|
||||||
|
```
|
||||||
|
|
||||||
|
This guide illustrates the main parameters that enable various decoding strategies. More advanced parameters exist for the
|
||||||
|
[`generate`] method, which gives you even further control over the [`generate`] method's behavior.
|
||||||
|
For the complete list of the available parameters, refer to the [API documentation](./main_classes/text_generation.md).
|
||||||
|
|
||||||
|
### Assisted Decoding
|
||||||
|
|
||||||
|
Assisted decoding is a modification of the decoding strategies above that uses an assistant model with the same
|
||||||
|
tokenizer (ideally a much smaller model) to greedily generate a few candidate tokens. The main model then validates
|
||||||
|
the candidate tokens in a single forward pass, which speeds up the decoding process. Currently, only greedy search
|
||||||
|
and sampling are supported with assisted decoding, and doesn't support batched inputs. To learn more about assisted
|
||||||
|
decoding, check [this blog post](https://huggingface.co/blog/assisted-generation).
|
||||||
|
|
||||||
|
To enable assisted decoding, set the `assistant_model` argument with a model.
|
||||||
|
|
||||||
|
```python
|
||||||
|
>>> from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||||
|
|
||||||
|
>>> prompt = "Alice and Bob"
|
||||||
|
>>> checkpoint = "EleutherAI/pythia-1.4b-deduped"
|
||||||
|
>>> assistant_checkpoint = "EleutherAI/pythia-160m-deduped"
|
||||||
|
|
||||||
|
>>> tokenizer = AutoTokenizer.from_pretrained(checkpoint)
|
||||||
|
>>> inputs = tokenizer(prompt, return_tensors="pt")
|
||||||
|
|
||||||
|
>>> model = AutoModelForCausalLM.from_pretrained(checkpoint)
|
||||||
|
>>> assistant_model = AutoModelForCausalLM.from_pretrained(assistant_checkpoint)
|
||||||
|
>>> outputs = model.generate(**inputs, assistant_model=assistant_model)
|
||||||
|
>>> tokenizer.batch_decode(outputs, skip_special_tokens=True)
|
||||||
|
['Alice and Bob are sitting in a bar. Alice is drinking a beer and Bob is drinking a']
|
||||||
|
```
|
||||||
|
|
||||||
|
When using assisted decoding with sampling methods, you can use the `temperarure` argument to control the randomness
|
||||||
|
just like in multinomial sampling. However, in assisted decoding, reducing the temperature will help improving latency.
|
||||||
|
|
||||||
|
```python
|
||||||
|
>>> from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||||
|
|
||||||
|
>>> prompt = "Alice and Bob"
|
||||||
|
>>> checkpoint = "EleutherAI/pythia-1.4b-deduped"
|
||||||
|
>>> assistant_checkpoint = "EleutherAI/pythia-160m-deduped"
|
||||||
|
|
||||||
|
>>> tokenizer = AutoTokenizer.from_pretrained(checkpoint)
|
||||||
|
>>> inputs = tokenizer(prompt, return_tensors="pt")
|
||||||
|
|
||||||
|
>>> model = AutoModelForCausalLM.from_pretrained(checkpoint)
|
||||||
|
>>> assistant_model = AutoModelForCausalLM.from_pretrained(assistant_checkpoint)
|
||||||
|
>>> outputs = model.generate(**inputs, assistant_model=assistant_model, do_sample=True, temperature=0.5)
|
||||||
|
>>> tokenizer.batch_decode(outputs, skip_special_tokens=True)
|
||||||
|
["Alice and Bob are sitting on the sofa. Alice says, 'I'm going to my room"]
|
||||||
|
```
|
||||||
491
docs/source/en/glossary.md
Normal file
491
docs/source/en/glossary.md
Normal file
@ -0,0 +1,491 @@
|
|||||||
|
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
|
the License. You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
|
specific language governing permissions and limitations under the License.
|
||||||
|
|
||||||
|
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||||
|
rendered properly in your Markdown viewer.
|
||||||
|
|
||||||
|
-->
|
||||||
|
|
||||||
|
# Glossary
|
||||||
|
|
||||||
|
This glossary defines general machine learning and 🤗 Transformers terms to help you better understand the
|
||||||
|
documentation.
|
||||||
|
|
||||||
|
## A
|
||||||
|
|
||||||
|
### attention mask
|
||||||
|
|
||||||
|
The attention mask is an optional argument used when batching sequences together.
|
||||||
|
|
||||||
|
<Youtube id="M6adb1j2jPI"/>
|
||||||
|
|
||||||
|
This argument indicates to the model which tokens should be attended to, and which should not.
|
||||||
|
|
||||||
|
For example, consider these two sequences:
|
||||||
|
|
||||||
|
```python
|
||||||
|
>>> from transformers import BertTokenizer
|
||||||
|
|
||||||
|
>>> tokenizer = BertTokenizer.from_pretrained("bert-base-cased")
|
||||||
|
|
||||||
|
>>> sequence_a = "This is a short sequence."
|
||||||
|
>>> sequence_b = "This is a rather long sequence. It is at least longer than the sequence A."
|
||||||
|
|
||||||
|
>>> encoded_sequence_a = tokenizer(sequence_a)["input_ids"]
|
||||||
|
>>> encoded_sequence_b = tokenizer(sequence_b)["input_ids"]
|
||||||
|
```
|
||||||
|
|
||||||
|
The encoded versions have different lengths:
|
||||||
|
|
||||||
|
```python
|
||||||
|
>>> len(encoded_sequence_a), len(encoded_sequence_b)
|
||||||
|
(8, 19)
|
||||||
|
```
|
||||||
|
|
||||||
|
Therefore, we can't put them together in the same tensor as-is. The first sequence needs to be padded up to the length
|
||||||
|
of the second one, or the second one needs to be truncated down to the length of the first one.
|
||||||
|
|
||||||
|
In the first case, the list of IDs will be extended by the padding indices. We can pass a list to the tokenizer and ask
|
||||||
|
it to pad like this:
|
||||||
|
|
||||||
|
```python
|
||||||
|
>>> padded_sequences = tokenizer([sequence_a, sequence_b], padding=True)
|
||||||
|
```
|
||||||
|
|
||||||
|
We can see that 0s have been added on the right of the first sentence to make it the same length as the second one:
|
||||||
|
|
||||||
|
```python
|
||||||
|
>>> padded_sequences["input_ids"]
|
||||||
|
[[101, 1188, 1110, 170, 1603, 4954, 119, 102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [101, 1188, 1110, 170, 1897, 1263, 4954, 119, 1135, 1110, 1120, 1655, 2039, 1190, 1103, 4954, 138, 119, 102]]
|
||||||
|
```
|
||||||
|
|
||||||
|
This can then be converted into a tensor in PyTorch or TensorFlow. The attention mask is a binary tensor indicating the
|
||||||
|
position of the padded indices so that the model does not attend to them. For the [`BertTokenizer`], `1` indicates a
|
||||||
|
value that should be attended to, while `0` indicates a padded value. This attention mask is in the dictionary returned
|
||||||
|
by the tokenizer under the key "attention_mask":
|
||||||
|
|
||||||
|
```python
|
||||||
|
>>> padded_sequences["attention_mask"]
|
||||||
|
[[1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
|
||||||
|
```
|
||||||
|
|
||||||
|
### autoencoding models
|
||||||
|
|
||||||
|
See [encoder models](#encoder-models) and [masked language modeling](#masked-language-modeling-mlm)
|
||||||
|
|
||||||
|
### autoregressive models
|
||||||
|
|
||||||
|
See [causal language modeling](#causal-language-modeling) and [decoder models](#decoder-models)
|
||||||
|
|
||||||
|
## B
|
||||||
|
|
||||||
|
### backbone
|
||||||
|
|
||||||
|
The backbone is the network (embeddings and layers) that outputs the raw hidden states or features. It is usually connected to a [head](#head) which accepts the features as its input to make a prediction. For example, [`ViTModel`] is a backbone without a specific head on top. Other models can also use [`VitModel`] as a backbone such as [DPT](model_doc/dpt).
|
||||||
|
|
||||||
|
## C
|
||||||
|
|
||||||
|
### causal language modeling
|
||||||
|
|
||||||
|
A pretraining task where the model reads the texts in order and has to predict the next word. It's usually done by
|
||||||
|
reading the whole sentence but using a mask inside the model to hide the future tokens at a certain timestep.
|
||||||
|
|
||||||
|
### channel
|
||||||
|
|
||||||
|
Color images are made up of some combination of values in three channels - red, green, and blue (RGB) - and grayscale images only have one channel. In 🤗 Transformers, the channel can be the first or last dimension of an image's tensor: [`n_channels`, `height`, `width`] or [`height`, `width`, `n_channels`].
|
||||||
|
|
||||||
|
### connectionist temporal classification (CTC)
|
||||||
|
|
||||||
|
An algorithm which allows a model to learn without knowing exactly how the input and output are aligned; CTC calculates the distribution of all possible outputs for a given input and chooses the most likely output from it. CTC is commonly used in speech recognition tasks because speech doesn't always cleanly align with the transcript for a variety of reasons such as a speaker's different speech rates.
|
||||||
|
|
||||||
|
### convolution
|
||||||
|
|
||||||
|
A type of layer in a neural network where the input matrix is multiplied element-wise by a smaller matrix (kernel or filter) and the values are summed up in a new matrix. This is known as a convolutional operation which is repeated over the entire input matrix. Each operation is applied to a different segment of the input matrix. Convolutional neural networks (CNNs) are commonly used in computer vision.
|
||||||
|
|
||||||
|
## D
|
||||||
|
|
||||||
|
### decoder input IDs
|
||||||
|
|
||||||
|
This input is specific to encoder-decoder models, and contains the input IDs that will be fed to the decoder. These
|
||||||
|
inputs should be used for sequence to sequence tasks, such as translation or summarization, and are usually built in a
|
||||||
|
way specific to each model.
|
||||||
|
|
||||||
|
Most encoder-decoder models (BART, T5) create their `decoder_input_ids` on their own from the `labels`. In such models,
|
||||||
|
passing the `labels` is the preferred way to handle training.
|
||||||
|
|
||||||
|
Please check each model's docs to see how they handle these input IDs for sequence to sequence training.
|
||||||
|
|
||||||
|
### decoder models
|
||||||
|
|
||||||
|
Also referred to as autoregressive models, decoder models involve a pretraining task (called causal language modeling) where the model reads the texts in order and has to predict the next word. It's usually done by
|
||||||
|
reading the whole sentence with a mask to hide future tokens at a certain timestep.
|
||||||
|
|
||||||
|
<Youtube id="d_ixlCubqQw"/>
|
||||||
|
|
||||||
|
### deep learning (DL)
|
||||||
|
|
||||||
|
Machine learning algorithms which uses neural networks with several layers.
|
||||||
|
|
||||||
|
## E
|
||||||
|
|
||||||
|
### encoder models
|
||||||
|
|
||||||
|
Also known as autoencoding models, encoder models take an input (such as text or images) and transform them into a condensed numerical representation called an embedding. Oftentimes, encoder models are pretrained using techniques like [masked language modeling](#masked-language-modeling-mlm), which masks parts of the input sequence and forces the model to create more meaningful representations.
|
||||||
|
|
||||||
|
<Youtube id="H39Z_720T5s"/>
|
||||||
|
|
||||||
|
## F
|
||||||
|
|
||||||
|
### feature extraction
|
||||||
|
|
||||||
|
The process of selecting and transforming raw data into a set of features that are more informative and useful for machine learning algorithms. Some examples of feature extraction include transforming raw text into word embeddings and extracting important features such as edges or shapes from image/video data.
|
||||||
|
|
||||||
|
### feed forward chunking
|
||||||
|
|
||||||
|
In each residual attention block in transformers the self-attention layer is usually followed by 2 feed forward layers.
|
||||||
|
The intermediate embedding size of the feed forward layers is often bigger than the hidden size of the model (e.g., for
|
||||||
|
`bert-base-uncased`).
|
||||||
|
|
||||||
|
For an input of size `[batch_size, sequence_length]`, the memory required to store the intermediate feed forward
|
||||||
|
embeddings `[batch_size, sequence_length, config.intermediate_size]` can account for a large fraction of the memory
|
||||||
|
use. The authors of [Reformer: The Efficient Transformer](https://arxiv.org/abs/2001.04451) noticed that since the
|
||||||
|
computation is independent of the `sequence_length` dimension, it is mathematically equivalent to compute the output
|
||||||
|
embeddings of both feed forward layers `[batch_size, config.hidden_size]_0, ..., [batch_size, config.hidden_size]_n`
|
||||||
|
individually and concat them afterward to `[batch_size, sequence_length, config.hidden_size]` with `n =
|
||||||
|
sequence_length`, which trades increased computation time against reduced memory use, but yields a mathematically
|
||||||
|
**equivalent** result.
|
||||||
|
|
||||||
|
For models employing the function [`apply_chunking_to_forward`], the `chunk_size` defines the number of output
|
||||||
|
embeddings that are computed in parallel and thus defines the trade-off between memory and time complexity. If
|
||||||
|
`chunk_size` is set to 0, no feed forward chunking is done.
|
||||||
|
|
||||||
|
### finetuned models
|
||||||
|
|
||||||
|
Finetuning is a form of transfer learning which involves taking a pretrained model, freezing its weights, and replacing the output layer with a newly added [model head](#head). The model head is trained on your target dataset.
|
||||||
|
|
||||||
|
See the [Fine-tune a pretrained model](https://huggingface.co/docs/transformers/training) tutorial for more details, and learn how to fine-tune models with 🤗 Transformers.
|
||||||
|
|
||||||
|
## H
|
||||||
|
|
||||||
|
### head
|
||||||
|
|
||||||
|
The model head refers to the last layer of a neural network that accepts the raw hidden states and projects them onto a different dimension. There is a different model head for each task. For example:
|
||||||
|
|
||||||
|
* [`GPT2ForSequenceClassification`] is a sequence classification head - a linear layer - on top of the base [`GPT2Model`].
|
||||||
|
* [`ViTForImageClassification`] is an image classification head - a linear layer on top of the final hidden state of the `CLS` token - on top of the base [`ViTModel`].
|
||||||
|
* [`Wav2Vec2ForCTC`] ia a language modeling head with [CTC](#connectionist-temporal-classification-(CTC)) on top of the base [`Wav2Vec2Model`].
|
||||||
|
|
||||||
|
## I
|
||||||
|
|
||||||
|
### image patch
|
||||||
|
|
||||||
|
Vision-based Transformers models split an image into smaller patches which are linearly embedded, and then passed as a sequence to the model. You can find the `patch_size` - or resolution - of the model in it's configuration.
|
||||||
|
|
||||||
|
### inference
|
||||||
|
|
||||||
|
Inference is the process of evaluating a model on new data after training is complete. See the [Pipeline for inference](https://huggingface.co/docs/transformers/pipeline_tutorial) tutorial to learn how to perform inference with 🤗 Transformers.
|
||||||
|
|
||||||
|
### input IDs
|
||||||
|
|
||||||
|
The input ids are often the only required parameters to be passed to the model as input. They are token indices,
|
||||||
|
numerical representations of tokens building the sequences that will be used as input by the model.
|
||||||
|
|
||||||
|
<Youtube id="VFp38yj8h3A"/>
|
||||||
|
|
||||||
|
Each tokenizer works differently but the underlying mechanism remains the same. Here's an example using the BERT
|
||||||
|
tokenizer, which is a [WordPiece](https://arxiv.org/pdf/1609.08144.pdf) tokenizer:
|
||||||
|
|
||||||
|
```python
|
||||||
|
>>> from transformers import BertTokenizer
|
||||||
|
|
||||||
|
>>> tokenizer = BertTokenizer.from_pretrained("bert-base-cased")
|
||||||
|
|
||||||
|
>>> sequence = "A Titan RTX has 24GB of VRAM"
|
||||||
|
```
|
||||||
|
|
||||||
|
The tokenizer takes care of splitting the sequence into tokens available in the tokenizer vocabulary.
|
||||||
|
|
||||||
|
```python
|
||||||
|
>>> tokenized_sequence = tokenizer.tokenize(sequence)
|
||||||
|
```
|
||||||
|
|
||||||
|
The tokens are either words or subwords. Here for instance, "VRAM" wasn't in the model vocabulary, so it's been split
|
||||||
|
in "V", "RA" and "M". To indicate those tokens are not separate words but parts of the same word, a double-hash prefix
|
||||||
|
is added for "RA" and "M":
|
||||||
|
|
||||||
|
```python
|
||||||
|
>>> print(tokenized_sequence)
|
||||||
|
['A', 'Titan', 'R', '##T', '##X', 'has', '24', '##GB', 'of', 'V', '##RA', '##M']
|
||||||
|
```
|
||||||
|
|
||||||
|
These tokens can then be converted into IDs which are understandable by the model. This can be done by directly feeding
|
||||||
|
the sentence to the tokenizer, which leverages the Rust implementation of [🤗
|
||||||
|
Tokenizers](https://github.com/huggingface/tokenizers) for peak performance.
|
||||||
|
|
||||||
|
```python
|
||||||
|
>>> inputs = tokenizer(sequence)
|
||||||
|
```
|
||||||
|
|
||||||
|
The tokenizer returns a dictionary with all the arguments necessary for its corresponding model to work properly. The
|
||||||
|
token indices are under the key `input_ids`:
|
||||||
|
|
||||||
|
```python
|
||||||
|
>>> encoded_sequence = inputs["input_ids"]
|
||||||
|
>>> print(encoded_sequence)
|
||||||
|
[101, 138, 18696, 155, 1942, 3190, 1144, 1572, 13745, 1104, 159, 9664, 2107, 102]
|
||||||
|
```
|
||||||
|
|
||||||
|
Note that the tokenizer automatically adds "special tokens" (if the associated model relies on them) which are special
|
||||||
|
IDs the model sometimes uses.
|
||||||
|
|
||||||
|
If we decode the previous sequence of ids,
|
||||||
|
|
||||||
|
```python
|
||||||
|
>>> decoded_sequence = tokenizer.decode(encoded_sequence)
|
||||||
|
```
|
||||||
|
|
||||||
|
we will see
|
||||||
|
|
||||||
|
```python
|
||||||
|
>>> print(decoded_sequence)
|
||||||
|
[CLS] A Titan RTX has 24GB of VRAM [SEP]
|
||||||
|
```
|
||||||
|
|
||||||
|
because this is the way a [`BertModel`] is going to expect its inputs.
|
||||||
|
|
||||||
|
## L
|
||||||
|
|
||||||
|
### labels
|
||||||
|
|
||||||
|
The labels are an optional argument which can be passed in order for the model to compute the loss itself. These labels
|
||||||
|
should be the expected prediction of the model: it will use the standard loss in order to compute the loss between its
|
||||||
|
predictions and the expected value (the label).
|
||||||
|
|
||||||
|
These labels are different according to the model head, for example:
|
||||||
|
|
||||||
|
- For sequence classification models, ([`BertForSequenceClassification`]), the model expects a tensor of dimension
|
||||||
|
`(batch_size)` with each value of the batch corresponding to the expected label of the entire sequence.
|
||||||
|
- For token classification models, ([`BertForTokenClassification`]), the model expects a tensor of dimension
|
||||||
|
`(batch_size, seq_length)` with each value corresponding to the expected label of each individual token.
|
||||||
|
- For masked language modeling, ([`BertForMaskedLM`]), the model expects a tensor of dimension `(batch_size,
|
||||||
|
seq_length)` with each value corresponding to the expected label of each individual token: the labels being the token
|
||||||
|
ID for the masked token, and values to be ignored for the rest (usually -100).
|
||||||
|
- For sequence to sequence tasks, ([`BartForConditionalGeneration`], [`MBartForConditionalGeneration`]), the model
|
||||||
|
expects a tensor of dimension `(batch_size, tgt_seq_length)` with each value corresponding to the target sequences
|
||||||
|
associated with each input sequence. During training, both BART and T5 will make the appropriate
|
||||||
|
`decoder_input_ids` and decoder attention masks internally. They usually do not need to be supplied. This does not
|
||||||
|
apply to models leveraging the Encoder-Decoder framework.
|
||||||
|
- For image classification models, ([`ViTForImageClassification`]), the model expects a tensor of dimension
|
||||||
|
`(batch_size)` with each value of the batch corresponding to the expected label of each individual image.
|
||||||
|
- For semantic segmentation models, ([`SegformerForSemanticSegmentation`]), the model expects a tensor of dimension
|
||||||
|
`(batch_size, height, width)` with each value of the batch corresponding to the expected label of each individual pixel.
|
||||||
|
- For object detection models, ([`DetrForObjectDetection`]), the model expects a list of dictionaries with a
|
||||||
|
`class_labels` and `boxes` key where each value of the batch corresponds to the expected label and number of bounding boxes of each individual image.
|
||||||
|
- For automatic speech recognition models, ([`Wav2Vec2ForCTC`]), the model expects a tensor of dimension `(batch_size,
|
||||||
|
target_length)` with each value corresponding to the expected label of each individual token.
|
||||||
|
|
||||||
|
<Tip>
|
||||||
|
|
||||||
|
Each model's labels may be different, so be sure to always check the documentation of each model for more information
|
||||||
|
about their specific labels!
|
||||||
|
|
||||||
|
</Tip>
|
||||||
|
|
||||||
|
The base models ([`BertModel`]) do not accept labels, as these are the base transformer models, simply outputting
|
||||||
|
features.
|
||||||
|
|
||||||
|
### large language models (LLM)
|
||||||
|
|
||||||
|
A generic term that refers to transformer language models (GPT-3, BLOOM, OPT) that were trained on a large quantity of data. These models also tend to have a large number of learnable parameters (e.g. 175 billion for GPT-3).
|
||||||
|
|
||||||
|
## M
|
||||||
|
|
||||||
|
### masked language modeling (MLM)
|
||||||
|
|
||||||
|
A pretraining task where the model sees a corrupted version of the texts, usually done by
|
||||||
|
masking some tokens randomly, and has to predict the original text.
|
||||||
|
|
||||||
|
### multimodal
|
||||||
|
|
||||||
|
A task that combines texts with another kind of inputs (for instance images).
|
||||||
|
|
||||||
|
## N
|
||||||
|
|
||||||
|
### Natural language generation (NLG)
|
||||||
|
|
||||||
|
All tasks related to generating text (for instance, [Write With Transformers](https://transformer.huggingface.co/), translation).
|
||||||
|
|
||||||
|
### Natural language processing (NLP)
|
||||||
|
|
||||||
|
A generic way to say "deal with texts".
|
||||||
|
|
||||||
|
### Natural language understanding (NLU)
|
||||||
|
|
||||||
|
All tasks related to understanding what is in a text (for instance classifying the
|
||||||
|
whole text, individual words).
|
||||||
|
|
||||||
|
## P
|
||||||
|
|
||||||
|
### pipeline
|
||||||
|
|
||||||
|
A pipeline in 🤗 Transformers is an abstraction referring to a series of steps that are executed in a specific order to preprocess and transform data and return a prediction from a model. Some example stages found in a pipeline might be data preprocessing, feature extraction, and normalization.
|
||||||
|
|
||||||
|
For more details, see [Pipelines for inference](https://huggingface.co/docs/transformers/pipeline_tutorial).
|
||||||
|
|
||||||
|
### pixel values
|
||||||
|
|
||||||
|
A tensor of the numerical representations of an image that is passed to a model. The pixel values have a shape of [`batch_size`, `num_channels`, `height`, `width`], and are generated from an image processor.
|
||||||
|
|
||||||
|
### pooling
|
||||||
|
|
||||||
|
An operation that reduces a matrix into a smaller matrix, either by taking the maximum or average of the pooled dimension(s). Pooling layers are commonly found between convolutional layers to downsample the feature representation.
|
||||||
|
|
||||||
|
### position IDs
|
||||||
|
|
||||||
|
Contrary to RNNs that have the position of each token embedded within them, transformers are unaware of the position of
|
||||||
|
each token. Therefore, the position IDs (`position_ids`) are used by the model to identify each token's position in the
|
||||||
|
list of tokens.
|
||||||
|
|
||||||
|
They are an optional parameter. If no `position_ids` are passed to the model, the IDs are automatically created as
|
||||||
|
absolute positional embeddings.
|
||||||
|
|
||||||
|
Absolute positional embeddings are selected in the range `[0, config.max_position_embeddings - 1]`. Some models use
|
||||||
|
other types of positional embeddings, such as sinusoidal position embeddings or relative position embeddings.
|
||||||
|
|
||||||
|
### preprocessing
|
||||||
|
|
||||||
|
The task of preparing raw data into a format that can be easily consumed by machine learning models. For example, text is typically preprocessed by tokenization. To gain a better idea of what preprocessing looks like for other input types, check out the [Preprocess](https://huggingface.co/docs/transformers/preprocessing) tutorial.
|
||||||
|
|
||||||
|
### pretrained model
|
||||||
|
|
||||||
|
A model that has been pretrained on some data (for instance all of Wikipedia). Pretraining methods involve a
|
||||||
|
self-supervised objective, which can be reading the text and trying to predict the next word (see [causal language
|
||||||
|
modeling](#causal-language-modeling)) or masking some words and trying to predict them (see [masked language
|
||||||
|
modeling](#masked-language-modeling-mlm)).
|
||||||
|
|
||||||
|
Speech and vision models have their own pretraining objectives. For example, Wav2Vec2 is a speech model pretrained on a contrastive task which requires the model to identify the "true" speech representation from a set of "false" speech representations. On the other hand, BEiT is a vision model pretrained on a masked image modeling task which masks some of the image patches and requires the model to predict the masked patches (similar to the masked language modeling objective).
|
||||||
|
|
||||||
|
## R
|
||||||
|
|
||||||
|
### recurrent neural network (RNN)
|
||||||
|
|
||||||
|
A type of model that uses a loop over a layer to process texts.
|
||||||
|
|
||||||
|
### representation learning
|
||||||
|
|
||||||
|
A subfield of machine learning which focuses on learning meaningful representations of raw data. Some examples of representation learning techniques include word embeddings, autoencoders, and Generative Adversarial Networks (GANs).
|
||||||
|
|
||||||
|
## S
|
||||||
|
|
||||||
|
### sampling rate
|
||||||
|
|
||||||
|
A measurement in hertz of the number of samples (the audio signal) taken per second. The sampling rate is a result of discretizing a continuous signal such as speech.
|
||||||
|
|
||||||
|
### self-attention
|
||||||
|
|
||||||
|
Each element of the input finds out which other elements of the input they should attend to.
|
||||||
|
|
||||||
|
### self-supervised learning
|
||||||
|
|
||||||
|
A category of machine learning techniques in which a model creates its own learning objective from unlabeled data. It differs from [unsupervised learning](#unsupervised-learning) and [supervised learning](#supervised-learning) in that the learning process is supervised, but not explicitly from the user.
|
||||||
|
|
||||||
|
One example of self-supervised learning is [masked language modeling](#masked-language-modeling-mlm), where a model is passed sentences with a proportion of its tokens removed and learns to predict the missing tokens.
|
||||||
|
|
||||||
|
### semi-supervised learning
|
||||||
|
|
||||||
|
A broad category of machine learning training techniques that leverages a small amount of labeled data with a larger quantity of unlabeled data to improve the accuracy of a model, unlike [supervised learning](#supervised-learning) and [unsupervised learning](#unsupervised-learning).
|
||||||
|
|
||||||
|
An example of a semi-supervised learning approach is "self-training", in which a model is trained on labeled data, and then used to make predictions on the unlabeled data. The portion of the unlabeled data that the model predicts with the most confidence gets added to the labeled dataset and used to retrain the model.
|
||||||
|
|
||||||
|
### sequence-to-sequence (seq2seq)
|
||||||
|
|
||||||
|
Models that generate a new sequence from an input, like translation models, or summarization models (such as
|
||||||
|
[Bart](model_doc/bart) or [T5](model_doc/t5)).
|
||||||
|
|
||||||
|
### stride
|
||||||
|
|
||||||
|
In [convolution](#convolution) or [pooling](#pooling), the stride refers to the distance the kernel is moved over a matrix. A stride of 1 means the kernel is moved one pixel over at a time, and a stride of 2 means the kernel is moved two pixels over at a time.
|
||||||
|
|
||||||
|
### supervised learning
|
||||||
|
|
||||||
|
A form of model training that directly uses labeled data to correct and instruct model performance. Data is fed into the model being trained, and its predictions are compared to the known labels. The model updates its weights based on how incorrect its predictions were, and the process is repeated to optimize model performance.
|
||||||
|
|
||||||
|
## T
|
||||||
|
|
||||||
|
### token
|
||||||
|
|
||||||
|
A part of a sentence, usually a word, but can also be a subword (non-common words are often split in subwords) or a
|
||||||
|
punctuation symbol.
|
||||||
|
|
||||||
|
### token Type IDs
|
||||||
|
|
||||||
|
Some models' purpose is to do classification on pairs of sentences or question answering.
|
||||||
|
|
||||||
|
<Youtube id="0u3ioSwev3s"/>
|
||||||
|
|
||||||
|
These require two different sequences to be joined in a single "input_ids" entry, which usually is performed with the
|
||||||
|
help of special tokens, such as the classifier (`[CLS]`) and separator (`[SEP]`) tokens. For example, the BERT model
|
||||||
|
builds its two sequence input as such:
|
||||||
|
|
||||||
|
```python
|
||||||
|
>>> # [CLS] SEQUENCE_A [SEP] SEQUENCE_B [SEP]
|
||||||
|
```
|
||||||
|
|
||||||
|
We can use our tokenizer to automatically generate such a sentence by passing the two sequences to `tokenizer` as two
|
||||||
|
arguments (and not a list, like before) like this:
|
||||||
|
|
||||||
|
```python
|
||||||
|
>>> from transformers import BertTokenizer
|
||||||
|
|
||||||
|
>>> tokenizer = BertTokenizer.from_pretrained("bert-base-cased")
|
||||||
|
>>> sequence_a = "HuggingFace is based in NYC"
|
||||||
|
>>> sequence_b = "Where is HuggingFace based?"
|
||||||
|
|
||||||
|
>>> encoded_dict = tokenizer(sequence_a, sequence_b)
|
||||||
|
>>> decoded = tokenizer.decode(encoded_dict["input_ids"])
|
||||||
|
```
|
||||||
|
|
||||||
|
which will return:
|
||||||
|
|
||||||
|
```python
|
||||||
|
>>> print(decoded)
|
||||||
|
[CLS] HuggingFace is based in NYC [SEP] Where is HuggingFace based? [SEP]
|
||||||
|
```
|
||||||
|
|
||||||
|
This is enough for some models to understand where one sequence ends and where another begins. However, other models,
|
||||||
|
such as BERT, also deploy token type IDs (also called segment IDs). They are represented as a binary mask identifying
|
||||||
|
the two types of sequence in the model.
|
||||||
|
|
||||||
|
The tokenizer returns this mask as the "token_type_ids" entry:
|
||||||
|
|
||||||
|
```python
|
||||||
|
>>> encoded_dict["token_type_ids"]
|
||||||
|
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1]
|
||||||
|
```
|
||||||
|
|
||||||
|
The first sequence, the "context" used for the question, has all its tokens represented by a `0`, whereas the second
|
||||||
|
sequence, corresponding to the "question", has all its tokens represented by a `1`.
|
||||||
|
|
||||||
|
Some models, like [`XLNetModel`] use an additional token represented by a `2`.
|
||||||
|
|
||||||
|
### transfer learning
|
||||||
|
|
||||||
|
A technique that involves taking a pretrained model and adapting it to a dataset specific to your task. Instead of training a model from scratch, you can leverage knowledge obtained from an existing model as a starting point. This speeds up the learning process and reduces the amount of training data needed.
|
||||||
|
|
||||||
|
### transformer
|
||||||
|
|
||||||
|
Self-attention based deep learning model architecture.
|
||||||
|
|
||||||
|
## U
|
||||||
|
|
||||||
|
### unsupervised learning
|
||||||
|
|
||||||
|
A form of model training in which data provided to the model is not labeled. Unsupervised learning techniques leverage statistical information of the data distribution to find patterns useful for the task at hand.
|
||||||
@ -1,299 +0,0 @@
|
|||||||
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
|
||||||
the License. You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
|
||||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
|
||||||
specific language governing permissions and limitations under the License.
|
|
||||||
-->
|
|
||||||
|
|
||||||
# Glossary
|
|
||||||
|
|
||||||
## General terms
|
|
||||||
|
|
||||||
- autoencoding models: see MLM
|
|
||||||
- autoregressive models: see CLM
|
|
||||||
- CLM: causal language modeling, a pretraining task where the model reads the texts in order and has to predict the
|
|
||||||
next word. It's usually done by reading the whole sentence but using a mask inside the model to hide the future
|
|
||||||
tokens at a certain timestep.
|
|
||||||
- deep learning: machine learning algorithms which uses neural networks with several layers.
|
|
||||||
- MLM: masked language modeling, a pretraining task where the model sees a corrupted version of the texts, usually done
|
|
||||||
by masking some tokens randomly, and has to predict the original text.
|
|
||||||
- multimodal: a task that combines texts with another kind of inputs (for instance images).
|
|
||||||
- NLG: natural language generation, all tasks related to generating text (for instance talk with transformers,
|
|
||||||
translation).
|
|
||||||
- NLP: natural language processing, a generic way to say "deal with texts".
|
|
||||||
- NLU: natural language understanding, all tasks related to understanding what is in a text (for instance classifying
|
|
||||||
the whole text, individual words).
|
|
||||||
- pretrained model: a model that has been pretrained on some data (for instance all of Wikipedia). Pretraining methods
|
|
||||||
involve a self-supervised objective, which can be reading the text and trying to predict the next word (see CLM) or
|
|
||||||
masking some words and trying to predict them (see MLM).
|
|
||||||
- RNN: recurrent neural network, a type of model that uses a loop over a layer to process texts.
|
|
||||||
- self-attention: each element of the input finds out which other elements of the input they should attend to.
|
|
||||||
- seq2seq or sequence-to-sequence: models that generate a new sequence from an input, like translation models, or
|
|
||||||
summarization models (such as [Bart](model_doc/bart) or [T5](model_doc/t5)).
|
|
||||||
- token: a part of a sentence, usually a word, but can also be a subword (non-common words are often split in subwords)
|
|
||||||
or a punctuation symbol.
|
|
||||||
- transformer: self-attention based deep learning model architecture.
|
|
||||||
|
|
||||||
## Model inputs
|
|
||||||
|
|
||||||
Every model is different yet bears similarities with the others. Therefore most models use the same inputs, which are
|
|
||||||
detailed here alongside usage examples.
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
### Input IDs
|
|
||||||
|
|
||||||
The input ids are often the only required parameters to be passed to the model as input. *They are token indices,
|
|
||||||
numerical representations of tokens building the sequences that will be used as input by the model*.
|
|
||||||
|
|
||||||
<Youtube id="VFp38yj8h3A"/>
|
|
||||||
|
|
||||||
Each tokenizer works differently but the underlying mechanism remains the same. Here's an example using the BERT
|
|
||||||
tokenizer, which is a [WordPiece](https://arxiv.org/pdf/1609.08144.pdf) tokenizer:
|
|
||||||
|
|
||||||
```python
|
|
||||||
>>> from transformers import BertTokenizer
|
|
||||||
|
|
||||||
>>> tokenizer = BertTokenizer.from_pretrained("bert-base-cased")
|
|
||||||
|
|
||||||
>>> sequence = "A Titan RTX has 24GB of VRAM"
|
|
||||||
```
|
|
||||||
|
|
||||||
The tokenizer takes care of splitting the sequence into tokens available in the tokenizer vocabulary.
|
|
||||||
|
|
||||||
```python
|
|
||||||
>>> tokenized_sequence = tokenizer.tokenize(sequence)
|
|
||||||
```
|
|
||||||
|
|
||||||
The tokens are either words or subwords. Here for instance, "VRAM" wasn't in the model vocabulary, so it's been split
|
|
||||||
in "V", "RA" and "M". To indicate those tokens are not separate words but parts of the same word, a double-hash prefix
|
|
||||||
is added for "RA" and "M":
|
|
||||||
|
|
||||||
```python
|
|
||||||
>>> print(tokenized_sequence)
|
|
||||||
['A', 'Titan', 'R', '##T', '##X', 'has', '24', '##GB', 'of', 'V', '##RA', '##M']
|
|
||||||
```
|
|
||||||
|
|
||||||
These tokens can then be converted into IDs which are understandable by the model. This can be done by directly feeding
|
|
||||||
the sentence to the tokenizer, which leverages the Rust implementation of [🤗 Tokenizers](https://github.com/huggingface/tokenizers) for peak performance.
|
|
||||||
|
|
||||||
```python
|
|
||||||
>>> inputs = tokenizer(sequence)
|
|
||||||
```
|
|
||||||
|
|
||||||
The tokenizer returns a dictionary with all the arguments necessary for its corresponding model to work properly. The
|
|
||||||
token indices are under the key "input_ids":
|
|
||||||
|
|
||||||
```python
|
|
||||||
>>> encoded_sequence = inputs["input_ids"]
|
|
||||||
>>> print(encoded_sequence)
|
|
||||||
[101, 138, 18696, 155, 1942, 3190, 1144, 1572, 13745, 1104, 159, 9664, 2107, 102]
|
|
||||||
```
|
|
||||||
|
|
||||||
Note that the tokenizer automatically adds "special tokens" (if the associated model relies on them) which are special
|
|
||||||
IDs the model sometimes uses.
|
|
||||||
|
|
||||||
If we decode the previous sequence of ids,
|
|
||||||
|
|
||||||
```python
|
|
||||||
>>> decoded_sequence = tokenizer.decode(encoded_sequence)
|
|
||||||
```
|
|
||||||
|
|
||||||
we will see
|
|
||||||
|
|
||||||
```python
|
|
||||||
>>> print(decoded_sequence)
|
|
||||||
[CLS] A Titan RTX has 24GB of VRAM [SEP]
|
|
||||||
```
|
|
||||||
|
|
||||||
because this is the way a [`BertModel`] is going to expect its inputs.
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
### Attention mask
|
|
||||||
|
|
||||||
The attention mask is an optional argument used when batching sequences together.
|
|
||||||
|
|
||||||
<Youtube id="M6adb1j2jPI"/>
|
|
||||||
|
|
||||||
This argument indicates to the model which tokens should be attended to, and which should not.
|
|
||||||
|
|
||||||
For example, consider these two sequences:
|
|
||||||
|
|
||||||
```python
|
|
||||||
>>> from transformers import BertTokenizer
|
|
||||||
|
|
||||||
>>> tokenizer = BertTokenizer.from_pretrained("bert-base-cased")
|
|
||||||
|
|
||||||
>>> sequence_a = "This is a short sequence."
|
|
||||||
>>> sequence_b = "This is a rather long sequence. It is at least longer than the sequence A."
|
|
||||||
|
|
||||||
>>> encoded_sequence_a = tokenizer(sequence_a)["input_ids"]
|
|
||||||
>>> encoded_sequence_b = tokenizer(sequence_b)["input_ids"]
|
|
||||||
```
|
|
||||||
|
|
||||||
The encoded versions have different lengths:
|
|
||||||
|
|
||||||
```python
|
|
||||||
>>> len(encoded_sequence_a), len(encoded_sequence_b)
|
|
||||||
(8, 19)
|
|
||||||
```
|
|
||||||
|
|
||||||
Therefore, we can't put them together in the same tensor as-is. The first sequence needs to be padded up to the length
|
|
||||||
of the second one, or the second one needs to be truncated down to the length of the first one.
|
|
||||||
|
|
||||||
In the first case, the list of IDs will be extended by the padding indices. We can pass a list to the tokenizer and ask
|
|
||||||
it to pad like this:
|
|
||||||
|
|
||||||
```python
|
|
||||||
>>> padded_sequences = tokenizer([sequence_a, sequence_b], padding=True)
|
|
||||||
```
|
|
||||||
|
|
||||||
We can see that 0s have been added on the right of the first sentence to make it the same length as the second one:
|
|
||||||
|
|
||||||
```python
|
|
||||||
>>> padded_sequences["input_ids"]
|
|
||||||
[[101, 1188, 1110, 170, 1603, 4954, 119, 102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [101, 1188, 1110, 170, 1897, 1263, 4954, 119, 1135, 1110, 1120, 1655, 2039, 1190, 1103, 4954, 138, 119, 102]]
|
|
||||||
```
|
|
||||||
|
|
||||||
This can then be converted into a tensor in PyTorch or TensorFlow. The attention mask is a binary tensor indicating the
|
|
||||||
position of the padded indices so that the model does not attend to them. For the [`BertTokenizer`],
|
|
||||||
`1` indicates a value that should be attended to, while `0` indicates a padded value. This attention mask is
|
|
||||||
in the dictionary returned by the tokenizer under the key "attention_mask":
|
|
||||||
|
|
||||||
```python
|
|
||||||
>>> padded_sequences["attention_mask"]
|
|
||||||
[[1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
|
|
||||||
```
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
### Token Type IDs
|
|
||||||
|
|
||||||
Some models' purpose is to do classification on pairs of sentences or question answering.
|
|
||||||
|
|
||||||
<Youtube id="0u3ioSwev3s"/>
|
|
||||||
|
|
||||||
These require two different sequences to be joined in a single "input_ids" entry, which usually is performed with the
|
|
||||||
help of special tokens, such as the classifier (`[CLS]`) and separator (`[SEP]`) tokens. For example, the BERT
|
|
||||||
model builds its two sequence input as such:
|
|
||||||
|
|
||||||
```python
|
|
||||||
>>> # [CLS] SEQUENCE_A [SEP] SEQUENCE_B [SEP]
|
|
||||||
```
|
|
||||||
|
|
||||||
We can use our tokenizer to automatically generate such a sentence by passing the two sequences to `tokenizer` as two
|
|
||||||
arguments (and not a list, like before) like this:
|
|
||||||
|
|
||||||
```python
|
|
||||||
>>> from transformers import BertTokenizer
|
|
||||||
|
|
||||||
>>> tokenizer = BertTokenizer.from_pretrained("bert-base-cased")
|
|
||||||
>>> sequence_a = "HuggingFace is based in NYC"
|
|
||||||
>>> sequence_b = "Where is HuggingFace based?"
|
|
||||||
|
|
||||||
>>> encoded_dict = tokenizer(sequence_a, sequence_b)
|
|
||||||
>>> decoded = tokenizer.decode(encoded_dict["input_ids"])
|
|
||||||
```
|
|
||||||
|
|
||||||
which will return:
|
|
||||||
|
|
||||||
```python
|
|
||||||
>>> print(decoded)
|
|
||||||
[CLS] HuggingFace is based in NYC [SEP] Where is HuggingFace based? [SEP]
|
|
||||||
```
|
|
||||||
|
|
||||||
This is enough for some models to understand where one sequence ends and where another begins. However, other models,
|
|
||||||
such as BERT, also deploy token type IDs (also called segment IDs). They are represented as a binary mask identifying
|
|
||||||
the two types of sequence in the model.
|
|
||||||
|
|
||||||
The tokenizer returns this mask as the "token_type_ids" entry:
|
|
||||||
|
|
||||||
```python
|
|
||||||
>>> encoded_dict["token_type_ids"]
|
|
||||||
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1]
|
|
||||||
```
|
|
||||||
|
|
||||||
The first sequence, the "context" used for the question, has all its tokens represented by a `0`, whereas the
|
|
||||||
second sequence, corresponding to the "question", has all its tokens represented by a `1`.
|
|
||||||
|
|
||||||
Some models, like [`XLNetModel`] use an additional token represented by a `2`.
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
### Position IDs
|
|
||||||
|
|
||||||
Contrary to RNNs that have the position of each token embedded within them, transformers are unaware of the position of
|
|
||||||
each token. Therefore, the position IDs (`position_ids`) are used by the model to identify each token's position in
|
|
||||||
the list of tokens.
|
|
||||||
|
|
||||||
They are an optional parameter. If no `position_ids` are passed to the model, the IDs are automatically created as
|
|
||||||
absolute positional embeddings.
|
|
||||||
|
|
||||||
Absolute positional embeddings are selected in the range `[0, config.max_position_embeddings - 1]`. Some models use
|
|
||||||
other types of positional embeddings, such as sinusoidal position embeddings or relative position embeddings.
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
### Labels
|
|
||||||
|
|
||||||
The labels are an optional argument which can be passed in order for the model to compute the loss itself. These labels
|
|
||||||
should be the expected prediction of the model: it will use the standard loss in order to compute the loss between its
|
|
||||||
predictions and the expected value (the label).
|
|
||||||
|
|
||||||
These labels are different according to the model head, for example:
|
|
||||||
|
|
||||||
- For sequence classification models (e.g., [`BertForSequenceClassification`]), the model expects a
|
|
||||||
tensor of dimension `(batch_size)` with each value of the batch corresponding to the expected label of the
|
|
||||||
entire sequence.
|
|
||||||
- For token classification models (e.g., [`BertForTokenClassification`]), the model expects a tensor
|
|
||||||
of dimension `(batch_size, seq_length)` with each value corresponding to the expected label of each individual
|
|
||||||
token.
|
|
||||||
- For masked language modeling (e.g., [`BertForMaskedLM`]), the model expects a tensor of dimension
|
|
||||||
`(batch_size, seq_length)` with each value corresponding to the expected label of each individual token: the
|
|
||||||
labels being the token ID for the masked token, and values to be ignored for the rest (usually -100).
|
|
||||||
- For sequence to sequence tasks,(e.g., [`BartForConditionalGeneration`],
|
|
||||||
[`MBartForConditionalGeneration`]), the model expects a tensor of dimension `(batch_size, tgt_seq_length)` with each value corresponding to the target sequences associated with each input sequence. During
|
|
||||||
training, both *BART* and *T5* will make the appropriate *decoder_input_ids* and decoder attention masks internally.
|
|
||||||
They usually do not need to be supplied. This does not apply to models leveraging the Encoder-Decoder framework. See
|
|
||||||
the documentation of each model for more information on each specific model's labels.
|
|
||||||
|
|
||||||
The base models (e.g., [`BertModel`]) do not accept labels, as these are the base transformer
|
|
||||||
models, simply outputting features.
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
### Decoder input IDs
|
|
||||||
|
|
||||||
This input is specific to encoder-decoder models, and contains the input IDs that will be fed to the decoder. These
|
|
||||||
inputs should be used for sequence to sequence tasks, such as translation or summarization, and are usually built in a
|
|
||||||
way specific to each model.
|
|
||||||
|
|
||||||
Most encoder-decoder models (BART, T5) create their `decoder_input_ids` on their own from the `labels`. In
|
|
||||||
such models, passing the `labels` is the preferred way to handle training.
|
|
||||||
|
|
||||||
Please check each model's docs to see how they handle these input IDs for sequence to sequence training.
|
|
||||||
|
|
||||||
|
|
||||||
### Feed Forward Chunking
|
|
||||||
|
|
||||||
In each residual attention block in transformers the self-attention layer is usually followed by 2 feed forward layers.
|
|
||||||
The intermediate embedding size of the feed forward layers is often bigger than the hidden size of the model (e.g., for
|
|
||||||
`bert-base-uncased`).
|
|
||||||
|
|
||||||
For an input of size `[batch_size, sequence_length]`, the memory required to store the intermediate feed forward
|
|
||||||
embeddings `[batch_size, sequence_length, config.intermediate_size]` can account for a large fraction of the memory
|
|
||||||
use. The authors of [Reformer: The Efficient Transformer](https://arxiv.org/abs/2001.04451) noticed that since the
|
|
||||||
computation is independent of the `sequence_length` dimension, it is mathematically equivalent to compute the output
|
|
||||||
embeddings of both feed forward layers `[batch_size, config.hidden_size]_0, ..., [batch_size, config.hidden_size]_n`
|
|
||||||
individually and concat them afterward to `[batch_size, sequence_length, config.hidden_size]` with `n = sequence_length`, which trades increased computation time against reduced memory use, but yields a mathematically
|
|
||||||
**equivalent** result.
|
|
||||||
|
|
||||||
For models employing the function [`apply_chunking_to_forward`], the `chunk_size` defines the
|
|
||||||
number of output embeddings that are computed in parallel and thus defines the trade-off between memory and time
|
|
||||||
complexity. If `chunk_size` is set to 0, no feed forward chunking is done.
|
|
||||||
@ -7,6 +7,10 @@ http://www.apache.org/licenses/LICENSE-2.0
|
|||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
|
|
||||||
|
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||||
|
rendered properly in your Markdown viewer.
|
||||||
|
|
||||||
-->
|
-->
|
||||||
|
|
||||||
# Hyperparameter Search using Trainer API
|
# Hyperparameter Search using Trainer API
|
||||||
@ -50,7 +54,7 @@ For optuna, see optuna [object_parameter](https://optuna.readthedocs.io/en/stabl
|
|||||||
... }
|
... }
|
||||||
```
|
```
|
||||||
|
|
||||||
For raytune, see raytune [object_parameter](https://docs.ray.io/en/latest/tune/api_docs/search_space.html), it's like following:
|
For raytune, see raytune [object_parameter](https://docs.ray.io/en/latest/tune/api/search_space.html), it's like following:
|
||||||
|
|
||||||
```py
|
```py
|
||||||
>>> def ray_hp_space(trial):
|
>>> def ray_hp_space(trial):
|
||||||
@ -117,4 +121,4 @@ You could define your own compute_objective function, if not defined, the defaul
|
|||||||
```
|
```
|
||||||
|
|
||||||
## Hyperparameter search For DDP finetune
|
## Hyperparameter search For DDP finetune
|
||||||
Currently, Hyperparameter search for DDP is enabled for optuna and sigopt. Only the rank-zero process will generate the search trial and pass the argument to other ranks.
|
Currently, Hyperparameter search for DDP is enabled for optuna and sigopt. Only the rank-zero process will generate the search trial and pass the argument to other ranks.
|
||||||
480
docs/source/en/index.md
Normal file
480
docs/source/en/index.md
Normal file
@ -0,0 +1,480 @@
|
|||||||
|
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
|
the License. You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
|
specific language governing permissions and limitations under the License.
|
||||||
|
|
||||||
|
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||||
|
rendered properly in your Markdown viewer.
|
||||||
|
-->
|
||||||
|
|
||||||
|
# 🤗 Transformers
|
||||||
|
|
||||||
|
State-of-the-art Machine Learning for [PyTorch](https://pytorch.org/), [TensorFlow](https://www.tensorflow.org/), and [JAX](https://jax.readthedocs.io/en/latest/).
|
||||||
|
|
||||||
|
🤗 Transformers provides APIs and tools to easily download and train state-of-the-art pretrained models. Using pretrained models can reduce your compute costs, carbon footprint, and save you the time and resources required to train a model from scratch. These models support common tasks in different modalities, such as:
|
||||||
|
|
||||||
|
📝 **Natural Language Processing**: text classification, named entity recognition, question answering, language modeling, summarization, translation, multiple choice, and text generation.<br>
|
||||||
|
🖼️ **Computer Vision**: image classification, object detection, and segmentation.<br>
|
||||||
|
🗣️ **Audio**: automatic speech recognition and audio classification.<br>
|
||||||
|
🐙 **Multimodal**: table question answering, optical character recognition, information extraction from scanned documents, video classification, and visual question answering.
|
||||||
|
|
||||||
|
🤗 Transformers support framework interoperability between PyTorch, TensorFlow, and JAX. This provides the flexibility to use a different framework at each stage of a model's life; train a model in three lines of code in one framework, and load it for inference in another. Models can also be exported to a format like ONNX and TorchScript for deployment in production environments.
|
||||||
|
|
||||||
|
Join the growing community on the [Hub](https://huggingface.co/models), [forum](https://discuss.huggingface.co/), or [Discord](https://discord.com/invite/JfAtkvEtRb) today!
|
||||||
|
|
||||||
|
## If you are looking for custom support from the Hugging Face team
|
||||||
|
|
||||||
|
<a target="_blank" href="https://huggingface.co/support">
|
||||||
|
<img alt="HuggingFace Expert Acceleration Program" src="https://cdn-media.huggingface.co/marketing/transformers/new-support-improved.png" style="width: 100%; max-width: 600px; border: 1px solid #eee; border-radius: 4px; box-shadow: 0 1px 2px 0 rgba(0, 0, 0, 0.05);">
|
||||||
|
</a>
|
||||||
|
|
||||||
|
## Contents
|
||||||
|
|
||||||
|
The documentation is organized into five sections:
|
||||||
|
|
||||||
|
- **GET STARTED** provides a quick tour of the library and installation instructions to get up and running.
|
||||||
|
- **TUTORIALS** are a great place to start if you're a beginner. This section will help you gain the basic skills you need to start using the library.
|
||||||
|
- **HOW-TO GUIDES** show you how to achieve a specific goal, like finetuning a pretrained model for language modeling or how to write and share a custom model.
|
||||||
|
- **CONCEPTUAL GUIDES** offers more discussion and explanation of the underlying concepts and ideas behind models, tasks, and the design philosophy of 🤗 Transformers.
|
||||||
|
- **API** describes all classes and functions:
|
||||||
|
|
||||||
|
- **MAIN CLASSES** details the most important classes like configuration, model, tokenizer, and pipeline.
|
||||||
|
- **MODELS** details the classes and functions related to each model implemented in the library.
|
||||||
|
- **INTERNAL HELPERS** details utility classes and functions used internally.
|
||||||
|
|
||||||
|
### Supported models
|
||||||
|
|
||||||
|
<!--This list is updated automatically from the README with _make fix-copies_. Do not update manually! -->
|
||||||
|
|
||||||
|
1. **[ALBERT](model_doc/albert)** (from Google Research and the Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.
|
||||||
|
1. **[ALIGN](model_doc/align)** (from Google Research) released with the paper [Scaling Up Visual and Vision-Language Representation Learning With Noisy Text Supervision](https://arxiv.org/abs/2102.05918) by Chao Jia, Yinfei Yang, Ye Xia, Yi-Ting Chen, Zarana Parekh, Hieu Pham, Quoc V. Le, Yunhsuan Sung, Zhen Li, Tom Duerig.
|
||||||
|
1. **[AltCLIP](model_doc/altclip)** (from BAAI) released with the paper [AltCLIP: Altering the Language Encoder in CLIP for Extended Language Capabilities](https://arxiv.org/abs/2211.06679) by Chen, Zhongzhi and Liu, Guang and Zhang, Bo-Wen and Ye, Fulong and Yang, Qinghong and Wu, Ledell.
|
||||||
|
1. **[Audio Spectrogram Transformer](model_doc/audio-spectrogram-transformer)** (from MIT) released with the paper [AST: Audio Spectrogram Transformer](https://arxiv.org/abs/2104.01778) by Yuan Gong, Yu-An Chung, James Glass.
|
||||||
|
1. **[Autoformer](model_doc/autoformer)** (from Tsinghua University) released with the paper [Autoformer: Decomposition Transformers with Auto-Correlation for Long-Term Series Forecasting](https://arxiv.org/abs/2106.13008) by Haixu Wu, Jiehui Xu, Jianmin Wang, Mingsheng Long.
|
||||||
|
1. **[Bark](model_doc/bark)** (from Suno) released in the repository [suno-ai/bark](https://github.com/suno-ai/bark) by Suno AI team.
|
||||||
|
1. **[BART](model_doc/bart)** (from Facebook) released with the paper [BART: Denoising Sequence-to-Sequence Pre-training for Natural Language Generation, Translation, and Comprehension](https://arxiv.org/abs/1910.13461) by Mike Lewis, Yinhan Liu, Naman Goyal, Marjan Ghazvininejad, Abdelrahman Mohamed, Omer Levy, Ves Stoyanov and Luke Zettlemoyer.
|
||||||
|
1. **[BARThez](model_doc/barthez)** (from École polytechnique) released with the paper [BARThez: a Skilled Pretrained French Sequence-to-Sequence Model](https://arxiv.org/abs/2010.12321) by Moussa Kamal Eddine, Antoine J.-P. Tixier, Michalis Vazirgiannis.
|
||||||
|
1. **[BARTpho](model_doc/bartpho)** (from VinAI Research) released with the paper [BARTpho: Pre-trained Sequence-to-Sequence Models for Vietnamese](https://arxiv.org/abs/2109.09701) by Nguyen Luong Tran, Duong Minh Le and Dat Quoc Nguyen.
|
||||||
|
1. **[BEiT](model_doc/beit)** (from Microsoft) released with the paper [BEiT: BERT Pre-Training of Image Transformers](https://arxiv.org/abs/2106.08254) by Hangbo Bao, Li Dong, Furu Wei.
|
||||||
|
1. **[BERT](model_doc/bert)** (from Google) released with the paper [BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding](https://arxiv.org/abs/1810.04805) by Jacob Devlin, Ming-Wei Chang, Kenton Lee and Kristina Toutanova.
|
||||||
|
1. **[BERT For Sequence Generation](model_doc/bert-generation)** (from Google) released with the paper [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://arxiv.org/abs/1907.12461) by Sascha Rothe, Shashi Narayan, Aliaksei Severyn.
|
||||||
|
1. **[BERTweet](model_doc/bertweet)** (from VinAI Research) released with the paper [BERTweet: A pre-trained language model for English Tweets](https://aclanthology.org/2020.emnlp-demos.2/) by Dat Quoc Nguyen, Thanh Vu and Anh Tuan Nguyen.
|
||||||
|
1. **[BigBird-Pegasus](model_doc/bigbird_pegasus)** (from Google Research) released with the paper [Big Bird: Transformers for Longer Sequences](https://arxiv.org/abs/2007.14062) by Manzil Zaheer, Guru Guruganesh, Avinava Dubey, Joshua Ainslie, Chris Alberti, Santiago Ontanon, Philip Pham, Anirudh Ravula, Qifan Wang, Li Yang, Amr Ahmed.
|
||||||
|
1. **[BigBird-RoBERTa](model_doc/big_bird)** (from Google Research) released with the paper [Big Bird: Transformers for Longer Sequences](https://arxiv.org/abs/2007.14062) by Manzil Zaheer, Guru Guruganesh, Avinava Dubey, Joshua Ainslie, Chris Alberti, Santiago Ontanon, Philip Pham, Anirudh Ravula, Qifan Wang, Li Yang, Amr Ahmed.
|
||||||
|
1. **[BioGpt](model_doc/biogpt)** (from Microsoft Research AI4Science) released with the paper [BioGPT: generative pre-trained transformer for biomedical text generation and mining](https://academic.oup.com/bib/advance-article/doi/10.1093/bib/bbac409/6713511?guestAccessKey=a66d9b5d-4f83-4017-bb52-405815c907b9) by Renqian Luo, Liai Sun, Yingce Xia, Tao Qin, Sheng Zhang, Hoifung Poon and Tie-Yan Liu.
|
||||||
|
1. **[BiT](model_doc/bit)** (from Google AI) released with the paper [Big Transfer (BiT): General Visual Representation Learning](https://arxiv.org/abs/1912.11370) by Alexander Kolesnikov, Lucas Beyer, Xiaohua Zhai, Joan Puigcerver, Jessica Yung, Sylvain Gelly, Neil Houlsby.
|
||||||
|
1. **[Blenderbot](model_doc/blenderbot)** (from Facebook) released with the paper [Recipes for building an open-domain chatbot](https://arxiv.org/abs/2004.13637) by Stephen Roller, Emily Dinan, Naman Goyal, Da Ju, Mary Williamson, Yinhan Liu, Jing Xu, Myle Ott, Kurt Shuster, Eric M. Smith, Y-Lan Boureau, Jason Weston.
|
||||||
|
1. **[BlenderbotSmall](model_doc/blenderbot-small)** (from Facebook) released with the paper [Recipes for building an open-domain chatbot](https://arxiv.org/abs/2004.13637) by Stephen Roller, Emily Dinan, Naman Goyal, Da Ju, Mary Williamson, Yinhan Liu, Jing Xu, Myle Ott, Kurt Shuster, Eric M. Smith, Y-Lan Boureau, Jason Weston.
|
||||||
|
1. **[BLIP](model_doc/blip)** (from Salesforce) released with the paper [BLIP: Bootstrapping Language-Image Pre-training for Unified Vision-Language Understanding and Generation](https://arxiv.org/abs/2201.12086) by Junnan Li, Dongxu Li, Caiming Xiong, Steven Hoi.
|
||||||
|
1. **[BLIP-2](model_doc/blip-2)** (from Salesforce) released with the paper [BLIP-2: Bootstrapping Language-Image Pre-training with Frozen Image Encoders and Large Language Models](https://arxiv.org/abs/2301.12597) by Junnan Li, Dongxu Li, Silvio Savarese, Steven Hoi.
|
||||||
|
1. **[BLOOM](model_doc/bloom)** (from BigScience workshop) released by the [BigScience Workshop](https://bigscience.huggingface.co/).
|
||||||
|
1. **[BORT](model_doc/bort)** (from Alexa) released with the paper [Optimal Subarchitecture Extraction For BERT](https://arxiv.org/abs/2010.10499) by Adrian de Wynter and Daniel J. Perry.
|
||||||
|
1. **[BridgeTower](model_doc/bridgetower)** (from Harbin Institute of Technology/Microsoft Research Asia/Intel Labs) released with the paper [BridgeTower: Building Bridges Between Encoders in Vision-Language Representation Learning](https://arxiv.org/abs/2206.08657) by Xiao Xu, Chenfei Wu, Shachar Rosenman, Vasudev Lal, Wanxiang Che, Nan Duan.
|
||||||
|
1. **[ByT5](model_doc/byt5)** (from Google Research) released with the paper [ByT5: Towards a token-free future with pre-trained byte-to-byte models](https://arxiv.org/abs/2105.13626) by Linting Xue, Aditya Barua, Noah Constant, Rami Al-Rfou, Sharan Narang, Mihir Kale, Adam Roberts, Colin Raffel.
|
||||||
|
1. **[CamemBERT](model_doc/camembert)** (from Inria/Facebook/Sorbonne) released with the paper [CamemBERT: a Tasty French Language Model](https://arxiv.org/abs/1911.03894) by Louis Martin*, Benjamin Muller*, Pedro Javier Ortiz Suárez*, Yoann Dupont, Laurent Romary, Éric Villemonte de la Clergerie, Djamé Seddah and Benoît Sagot.
|
||||||
|
1. **[CANINE](model_doc/canine)** (from Google Research) released with the paper [CANINE: Pre-training an Efficient Tokenization-Free Encoder for Language Representation](https://arxiv.org/abs/2103.06874) by Jonathan H. Clark, Dan Garrette, Iulia Turc, John Wieting.
|
||||||
|
1. **[Chinese-CLIP](model_doc/chinese_clip)** (from OFA-Sys) released with the paper [Chinese CLIP: Contrastive Vision-Language Pretraining in Chinese](https://arxiv.org/abs/2211.01335) by An Yang, Junshu Pan, Junyang Lin, Rui Men, Yichang Zhang, Jingren Zhou, Chang Zhou.
|
||||||
|
1. **[CLAP](model_doc/clap)** (from LAION-AI) released with the paper [Large-scale Contrastive Language-Audio Pretraining with Feature Fusion and Keyword-to-Caption Augmentation](https://arxiv.org/abs/2211.06687) by Yusong Wu, Ke Chen, Tianyu Zhang, Yuchen Hui, Taylor Berg-Kirkpatrick, Shlomo Dubnov.
|
||||||
|
1. **[CLIP](model_doc/clip)** (from OpenAI) released with the paper [Learning Transferable Visual Models From Natural Language Supervision](https://arxiv.org/abs/2103.00020) by Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, Ilya Sutskever.
|
||||||
|
1. **[CLIPSeg](model_doc/clipseg)** (from University of Göttingen) released with the paper [Image Segmentation Using Text and Image Prompts](https://arxiv.org/abs/2112.10003) by Timo Lüddecke and Alexander Ecker.
|
||||||
|
1. **[CodeGen](model_doc/codegen)** (from Salesforce) released with the paper [A Conversational Paradigm for Program Synthesis](https://arxiv.org/abs/2203.13474) by Erik Nijkamp, Bo Pang, Hiroaki Hayashi, Lifu Tu, Huan Wang, Yingbo Zhou, Silvio Savarese, Caiming Xiong.
|
||||||
|
1. **[Conditional DETR](model_doc/conditional_detr)** (from Microsoft Research Asia) released with the paper [Conditional DETR for Fast Training Convergence](https://arxiv.org/abs/2108.06152) by Depu Meng, Xiaokang Chen, Zejia Fan, Gang Zeng, Houqiang Li, Yuhui Yuan, Lei Sun, Jingdong Wang.
|
||||||
|
1. **[ConvBERT](model_doc/convbert)** (from YituTech) released with the paper [ConvBERT: Improving BERT with Span-based Dynamic Convolution](https://arxiv.org/abs/2008.02496) by Zihang Jiang, Weihao Yu, Daquan Zhou, Yunpeng Chen, Jiashi Feng, Shuicheng Yan.
|
||||||
|
1. **[ConvNeXT](model_doc/convnext)** (from Facebook AI) released with the paper [A ConvNet for the 2020s](https://arxiv.org/abs/2201.03545) by Zhuang Liu, Hanzi Mao, Chao-Yuan Wu, Christoph Feichtenhofer, Trevor Darrell, Saining Xie.
|
||||||
|
1. **[ConvNeXTV2](model_doc/convnextv2)** (from Facebook AI) released with the paper [ConvNeXt V2: Co-designing and Scaling ConvNets with Masked Autoencoders](https://arxiv.org/abs/2301.00808) by Sanghyun Woo, Shoubhik Debnath, Ronghang Hu, Xinlei Chen, Zhuang Liu, In So Kweon, Saining Xie.
|
||||||
|
1. **[CPM](model_doc/cpm)** (from Tsinghua University) released with the paper [CPM: A Large-scale Generative Chinese Pre-trained Language Model](https://arxiv.org/abs/2012.00413) by Zhengyan Zhang, Xu Han, Hao Zhou, Pei Ke, Yuxian Gu, Deming Ye, Yujia Qin, Yusheng Su, Haozhe Ji, Jian Guan, Fanchao Qi, Xiaozhi Wang, Yanan Zheng, Guoyang Zeng, Huanqi Cao, Shengqi Chen, Daixuan Li, Zhenbo Sun, Zhiyuan Liu, Minlie Huang, Wentao Han, Jie Tang, Juanzi Li, Xiaoyan Zhu, Maosong Sun.
|
||||||
|
1. **[CPM-Ant](model_doc/cpmant)** (from OpenBMB) released by the [OpenBMB](https://www.openbmb.org/).
|
||||||
|
1. **[CTRL](model_doc/ctrl)** (from Salesforce) released with the paper [CTRL: A Conditional Transformer Language Model for Controllable Generation](https://arxiv.org/abs/1909.05858) by Nitish Shirish Keskar*, Bryan McCann*, Lav R. Varshney, Caiming Xiong and Richard Socher.
|
||||||
|
1. **[CvT](model_doc/cvt)** (from Microsoft) released with the paper [CvT: Introducing Convolutions to Vision Transformers](https://arxiv.org/abs/2103.15808) by Haiping Wu, Bin Xiao, Noel Codella, Mengchen Liu, Xiyang Dai, Lu Yuan, Lei Zhang.
|
||||||
|
1. **[Data2Vec](model_doc/data2vec)** (from Facebook) released with the paper [Data2Vec: A General Framework for Self-supervised Learning in Speech, Vision and Language](https://arxiv.org/abs/2202.03555) by Alexei Baevski, Wei-Ning Hsu, Qiantong Xu, Arun Babu, Jiatao Gu, Michael Auli.
|
||||||
|
1. **[DeBERTa](model_doc/deberta)** (from Microsoft) released with the paper [DeBERTa: Decoding-enhanced BERT with Disentangled Attention](https://arxiv.org/abs/2006.03654) by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen.
|
||||||
|
1. **[DeBERTa-v2](model_doc/deberta-v2)** (from Microsoft) released with the paper [DeBERTa: Decoding-enhanced BERT with Disentangled Attention](https://arxiv.org/abs/2006.03654) by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen.
|
||||||
|
1. **[Decision Transformer](model_doc/decision_transformer)** (from Berkeley/Facebook/Google) released with the paper [Decision Transformer: Reinforcement Learning via Sequence Modeling](https://arxiv.org/abs/2106.01345) by Lili Chen, Kevin Lu, Aravind Rajeswaran, Kimin Lee, Aditya Grover, Michael Laskin, Pieter Abbeel, Aravind Srinivas, Igor Mordatch.
|
||||||
|
1. **[Deformable DETR](model_doc/deformable_detr)** (from SenseTime Research) released with the paper [Deformable DETR: Deformable Transformers for End-to-End Object Detection](https://arxiv.org/abs/2010.04159) by Xizhou Zhu, Weijie Su, Lewei Lu, Bin Li, Xiaogang Wang, Jifeng Dai.
|
||||||
|
1. **[DeiT](model_doc/deit)** (from Facebook) released with the paper [Training data-efficient image transformers & distillation through attention](https://arxiv.org/abs/2012.12877) by Hugo Touvron, Matthieu Cord, Matthijs Douze, Francisco Massa, Alexandre Sablayrolles, Hervé Jégou.
|
||||||
|
1. **[DePlot](model_doc/deplot)** (from Google AI) released with the paper [DePlot: One-shot visual language reasoning by plot-to-table translation](https://arxiv.org/abs/2212.10505) by Fangyu Liu, Julian Martin Eisenschlos, Francesco Piccinno, Syrine Krichene, Chenxi Pang, Kenton Lee, Mandar Joshi, Wenhu Chen, Nigel Collier, Yasemin Altun.
|
||||||
|
1. **[DETA](model_doc/deta)** (from The University of Texas at Austin) released with the paper [NMS Strikes Back](https://arxiv.org/abs/2212.06137) by Jeffrey Ouyang-Zhang, Jang Hyun Cho, Xingyi Zhou, Philipp Krähenbühl.
|
||||||
|
1. **[DETR](model_doc/detr)** (from Facebook) released with the paper [End-to-End Object Detection with Transformers](https://arxiv.org/abs/2005.12872) by Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, Sergey Zagoruyko.
|
||||||
|
1. **[DialoGPT](model_doc/dialogpt)** (from Microsoft Research) released with the paper [DialoGPT: Large-Scale Generative Pre-training for Conversational Response Generation](https://arxiv.org/abs/1911.00536) by Yizhe Zhang, Siqi Sun, Michel Galley, Yen-Chun Chen, Chris Brockett, Xiang Gao, Jianfeng Gao, Jingjing Liu, Bill Dolan.
|
||||||
|
1. **[DiNAT](model_doc/dinat)** (from SHI Labs) released with the paper [Dilated Neighborhood Attention Transformer](https://arxiv.org/abs/2209.15001) by Ali Hassani and Humphrey Shi.
|
||||||
|
1. **[DistilBERT](model_doc/distilbert)** (from HuggingFace), released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same method has been applied to compress GPT2 into [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/research_projects/distillation), RoBERTa into [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/research_projects/distillation), Multilingual BERT into [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/research_projects/distillation) and a German version of DistilBERT.
|
||||||
|
1. **[DiT](model_doc/dit)** (from Microsoft Research) released with the paper [DiT: Self-supervised Pre-training for Document Image Transformer](https://arxiv.org/abs/2203.02378) by Junlong Li, Yiheng Xu, Tengchao Lv, Lei Cui, Cha Zhang, Furu Wei.
|
||||||
|
1. **[Donut](model_doc/donut)** (from NAVER), released together with the paper [OCR-free Document Understanding Transformer](https://arxiv.org/abs/2111.15664) by Geewook Kim, Teakgyu Hong, Moonbin Yim, Jeongyeon Nam, Jinyoung Park, Jinyeong Yim, Wonseok Hwang, Sangdoo Yun, Dongyoon Han, Seunghyun Park.
|
||||||
|
1. **[DPR](model_doc/dpr)** (from Facebook) released with the paper [Dense Passage Retrieval for Open-Domain Question Answering](https://arxiv.org/abs/2004.04906) by Vladimir Karpukhin, Barlas Oğuz, Sewon Min, Patrick Lewis, Ledell Wu, Sergey Edunov, Danqi Chen, and Wen-tau Yih.
|
||||||
|
1. **[DPT](master/model_doc/dpt)** (from Intel Labs) released with the paper [Vision Transformers for Dense Prediction](https://arxiv.org/abs/2103.13413) by René Ranftl, Alexey Bochkovskiy, Vladlen Koltun.
|
||||||
|
1. **[EfficientFormer](model_doc/efficientformer)** (from Snap Research) released with the paper [EfficientFormer: Vision Transformers at MobileNetSpeed](https://arxiv.org/abs/2206.01191) by Yanyu Li, Geng Yuan, Yang Wen, Ju Hu, Georgios Evangelidis, Sergey Tulyakov, Yanzhi Wang, Jian Ren.
|
||||||
|
1. **[EfficientNet](model_doc/efficientnet)** (from Google Brain) released with the paper [EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks](https://arxiv.org/abs/1905.11946) by Mingxing Tan, Quoc V. Le.
|
||||||
|
1. **[ELECTRA](model_doc/electra)** (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang Luong, Quoc V. Le, Christopher D. Manning.
|
||||||
|
1. **[EnCodec](model_doc/encodec)** (from Meta AI) released with the paper [High Fidelity Neural Audio Compression](https://arxiv.org/abs/2210.13438) by Alexandre Défossez, Jade Copet, Gabriel Synnaeve, Yossi Adi.
|
||||||
|
1. **[EncoderDecoder](model_doc/encoder-decoder)** (from Google Research) released with the paper [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://arxiv.org/abs/1907.12461) by Sascha Rothe, Shashi Narayan, Aliaksei Severyn.
|
||||||
|
1. **[ERNIE](model_doc/ernie)** (from Baidu) released with the paper [ERNIE: Enhanced Representation through Knowledge Integration](https://arxiv.org/abs/1904.09223) by Yu Sun, Shuohuan Wang, Yukun Li, Shikun Feng, Xuyi Chen, Han Zhang, Xin Tian, Danxiang Zhu, Hao Tian, Hua Wu.
|
||||||
|
1. **[ErnieM](model_doc/ernie_m)** (from Baidu) released with the paper [ERNIE-M: Enhanced Multilingual Representation by Aligning Cross-lingual Semantics with Monolingual Corpora](https://arxiv.org/abs/2012.15674) by Xuan Ouyang, Shuohuan Wang, Chao Pang, Yu Sun, Hao Tian, Hua Wu, Haifeng Wang.
|
||||||
|
1. **[ESM](model_doc/esm)** (from Meta AI) are transformer protein language models. **ESM-1b** was released with the paper [Biological structure and function emerge from scaling unsupervised learning to 250 million protein sequences](https://www.pnas.org/content/118/15/e2016239118) by Alexander Rives, Joshua Meier, Tom Sercu, Siddharth Goyal, Zeming Lin, Jason Liu, Demi Guo, Myle Ott, C. Lawrence Zitnick, Jerry Ma, and Rob Fergus. **ESM-1v** was released with the paper [Language models enable zero-shot prediction of the effects of mutations on protein function](https://doi.org/10.1101/2021.07.09.450648) by Joshua Meier, Roshan Rao, Robert Verkuil, Jason Liu, Tom Sercu and Alexander Rives. **ESM-2 and ESMFold** were released with the paper [Language models of protein sequences at the scale of evolution enable accurate structure prediction](https://doi.org/10.1101/2022.07.20.500902) by Zeming Lin, Halil Akin, Roshan Rao, Brian Hie, Zhongkai Zhu, Wenting Lu, Allan dos Santos Costa, Maryam Fazel-Zarandi, Tom Sercu, Sal Candido, Alexander Rives.
|
||||||
|
1. **[Falcon](model_doc/falcon)** (from Technology Innovation Institute) by Almazrouei, Ebtesam and Alobeidli, Hamza and Alshamsi, Abdulaziz and Cappelli, Alessandro and Cojocaru, Ruxandra and Debbah, Merouane and Goffinet, Etienne and Heslow, Daniel and Launay, Julien and Malartic, Quentin and Noune, Badreddine and Pannier, Baptiste and Penedo, Guilherme.
|
||||||
|
1. **[FLAN-T5](model_doc/flan-t5)** (from Google AI) released in the repository [google-research/t5x](https://github.com/google-research/t5x/blob/main/docs/models.md#flan-t5-checkpoints) by Hyung Won Chung, Le Hou, Shayne Longpre, Barret Zoph, Yi Tay, William Fedus, Eric Li, Xuezhi Wang, Mostafa Dehghani, Siddhartha Brahma, Albert Webson, Shixiang Shane Gu, Zhuyun Dai, Mirac Suzgun, Xinyun Chen, Aakanksha Chowdhery, Sharan Narang, Gaurav Mishra, Adams Yu, Vincent Zhao, Yanping Huang, Andrew Dai, Hongkun Yu, Slav Petrov, Ed H. Chi, Jeff Dean, Jacob Devlin, Adam Roberts, Denny Zhou, Quoc V. Le, and Jason Wei
|
||||||
|
1. **[FLAN-UL2](model_doc/flan-ul2)** (from Google AI) released in the repository [google-research/t5x](https://github.com/google-research/t5x/blob/main/docs/models.md#flan-ul2-checkpoints) by Hyung Won Chung, Le Hou, Shayne Longpre, Barret Zoph, Yi Tay, William Fedus, Eric Li, Xuezhi Wang, Mostafa Dehghani, Siddhartha Brahma, Albert Webson, Shixiang Shane Gu, Zhuyun Dai, Mirac Suzgun, Xinyun Chen, Aakanksha Chowdhery, Sharan Narang, Gaurav Mishra, Adams Yu, Vincent Zhao, Yanping Huang, Andrew Dai, Hongkun Yu, Slav Petrov, Ed H. Chi, Jeff Dean, Jacob Devlin, Adam Roberts, Denny Zhou, Quoc V. Le, and Jason Wei
|
||||||
|
1. **[FlauBERT](model_doc/flaubert)** (from CNRS) released with the paper [FlauBERT: Unsupervised Language Model Pre-training for French](https://arxiv.org/abs/1912.05372) by Hang Le, Loïc Vial, Jibril Frej, Vincent Segonne, Maximin Coavoux, Benjamin Lecouteux, Alexandre Allauzen, Benoît Crabbé, Laurent Besacier, Didier Schwab.
|
||||||
|
1. **[FLAVA](model_doc/flava)** (from Facebook AI) released with the paper [FLAVA: A Foundational Language And Vision Alignment Model](https://arxiv.org/abs/2112.04482) by Amanpreet Singh, Ronghang Hu, Vedanuj Goswami, Guillaume Couairon, Wojciech Galuba, Marcus Rohrbach, and Douwe Kiela.
|
||||||
|
1. **[FNet](model_doc/fnet)** (from Google Research) released with the paper [FNet: Mixing Tokens with Fourier Transforms](https://arxiv.org/abs/2105.03824) by James Lee-Thorp, Joshua Ainslie, Ilya Eckstein, Santiago Ontanon.
|
||||||
|
1. **[FocalNet](model_doc/focalnet)** (from Microsoft Research) released with the paper [Focal Modulation Networks](https://arxiv.org/abs/2203.11926) by Jianwei Yang, Chunyuan Li, Xiyang Dai, Lu Yuan, Jianfeng Gao.
|
||||||
|
1. **[Funnel Transformer](model_doc/funnel)** (from CMU/Google Brain) released with the paper [Funnel-Transformer: Filtering out Sequential Redundancy for Efficient Language Processing](https://arxiv.org/abs/2006.03236) by Zihang Dai, Guokun Lai, Yiming Yang, Quoc V. Le.
|
||||||
|
1. **[GIT](model_doc/git)** (from Microsoft Research) released with the paper [GIT: A Generative Image-to-text Transformer for Vision and Language](https://arxiv.org/abs/2205.14100) by Jianfeng Wang, Zhengyuan Yang, Xiaowei Hu, Linjie Li, Kevin Lin, Zhe Gan, Zicheng Liu, Ce Liu, Lijuan Wang.
|
||||||
|
1. **[GLPN](model_doc/glpn)** (from KAIST) released with the paper [Global-Local Path Networks for Monocular Depth Estimation with Vertical CutDepth](https://arxiv.org/abs/2201.07436) by Doyeon Kim, Woonghyun Ga, Pyungwhan Ahn, Donggyu Joo, Sehwan Chun, Junmo Kim.
|
||||||
|
1. **[GPT](model_doc/openai-gpt)** (from OpenAI) released with the paper [Improving Language Understanding by Generative Pre-Training](https://blog.openai.com/language-unsupervised/) by Alec Radford, Karthik Narasimhan, Tim Salimans and Ilya Sutskever.
|
||||||
|
1. **[GPT Neo](model_doc/gpt_neo)** (from EleutherAI) released in the repository [EleutherAI/gpt-neo](https://github.com/EleutherAI/gpt-neo) by Sid Black, Stella Biderman, Leo Gao, Phil Wang and Connor Leahy.
|
||||||
|
1. **[GPT NeoX](model_doc/gpt_neox)** (from EleutherAI) released with the paper [GPT-NeoX-20B: An Open-Source Autoregressive Language Model](https://arxiv.org/abs/2204.06745) by Sid Black, Stella Biderman, Eric Hallahan, Quentin Anthony, Leo Gao, Laurence Golding, Horace He, Connor Leahy, Kyle McDonell, Jason Phang, Michael Pieler, USVSN Sai Prashanth, Shivanshu Purohit, Laria Reynolds, Jonathan Tow, Ben Wang, Samuel Weinbach
|
||||||
|
1. **[GPT NeoX Japanese](model_doc/gpt_neox_japanese)** (from ABEJA) released by Shinya Otani, Takayoshi Makabe, Anuj Arora, and Kyo Hattori.
|
||||||
|
1. **[GPT-2](model_doc/gpt2)** (from OpenAI) released with the paper [Language Models are Unsupervised Multitask Learners](https://blog.openai.com/better-language-models/) by Alec Radford*, Jeffrey Wu*, Rewon Child, David Luan, Dario Amodei** and Ilya Sutskever**.
|
||||||
|
1. **[GPT-J](model_doc/gptj)** (from EleutherAI) released in the repository [kingoflolz/mesh-transformer-jax](https://github.com/kingoflolz/mesh-transformer-jax/) by Ben Wang and Aran Komatsuzaki.
|
||||||
|
1. **[GPT-Sw3](model_doc/gpt-sw3)** (from AI-Sweden) released with the paper [Lessons Learned from GPT-SW3: Building the First Large-Scale Generative Language Model for Swedish](http://www.lrec-conf.org/proceedings/lrec2022/pdf/2022.lrec-1.376.pdf) by Ariel Ekgren, Amaru Cuba Gyllensten, Evangelia Gogoulou, Alice Heiman, Severine Verlinden, Joey Öhman, Fredrik Carlsson, Magnus Sahlgren.
|
||||||
|
1. **[GPTBigCode](model_doc/gpt_bigcode)** (from BigCode) released with the paper [SantaCoder: don't reach for the stars!](https://arxiv.org/abs/2301.03988) by Loubna Ben Allal, Raymond Li, Denis Kocetkov, Chenghao Mou, Christopher Akiki, Carlos Munoz Ferrandis, Niklas Muennighoff, Mayank Mishra, Alex Gu, Manan Dey, Logesh Kumar Umapathi, Carolyn Jane Anderson, Yangtian Zi, Joel Lamy Poirier, Hailey Schoelkopf, Sergey Troshin, Dmitry Abulkhanov, Manuel Romero, Michael Lappert, Francesco De Toni, Bernardo García del Río, Qian Liu, Shamik Bose, Urvashi Bhattacharyya, Terry Yue Zhuo, Ian Yu, Paulo Villegas, Marco Zocca, Sourab Mangrulkar, David Lansky, Huu Nguyen, Danish Contractor, Luis Villa, Jia Li, Dzmitry Bahdanau, Yacine Jernite, Sean Hughes, Daniel Fried, Arjun Guha, Harm de Vries, Leandro von Werra.
|
||||||
|
1. **[GPTSAN-japanese](model_doc/gptsan-japanese)** released in the repository [tanreinama/GPTSAN](https://github.com/tanreinama/GPTSAN/blob/main/report/model.md) by Toshiyuki Sakamoto(tanreinama).
|
||||||
|
1. **[Graphormer](model_doc/graphormer)** (from Microsoft) released with the paper [Do Transformers Really Perform Bad for Graph Representation?](https://arxiv.org/abs/2106.05234) by Chengxuan Ying, Tianle Cai, Shengjie Luo, Shuxin Zheng, Guolin Ke, Di He, Yanming Shen, Tie-Yan Liu.
|
||||||
|
1. **[GroupViT](model_doc/groupvit)** (from UCSD, NVIDIA) released with the paper [GroupViT: Semantic Segmentation Emerges from Text Supervision](https://arxiv.org/abs/2202.11094) by Jiarui Xu, Shalini De Mello, Sifei Liu, Wonmin Byeon, Thomas Breuel, Jan Kautz, Xiaolong Wang.
|
||||||
|
1. **[Hubert](model_doc/hubert)** (from Facebook) released with the paper [HuBERT: Self-Supervised Speech Representation Learning by Masked Prediction of Hidden Units](https://arxiv.org/abs/2106.07447) by Wei-Ning Hsu, Benjamin Bolte, Yao-Hung Hubert Tsai, Kushal Lakhotia, Ruslan Salakhutdinov, Abdelrahman Mohamed.
|
||||||
|
1. **[I-BERT](model_doc/ibert)** (from Berkeley) released with the paper [I-BERT: Integer-only BERT Quantization](https://arxiv.org/abs/2101.01321) by Sehoon Kim, Amir Gholami, Zhewei Yao, Michael W. Mahoney, Kurt Keutzer.
|
||||||
|
1. **[ImageGPT](model_doc/imagegpt)** (from OpenAI) released with the paper [Generative Pretraining from Pixels](https://openai.com/blog/image-gpt/) by Mark Chen, Alec Radford, Rewon Child, Jeffrey Wu, Heewoo Jun, David Luan, Ilya Sutskever.
|
||||||
|
1. **[Informer](model_doc/informer)** (from Beihang University, UC Berkeley, Rutgers University, SEDD Company) released with the paper [Informer: Beyond Efficient Transformer for Long Sequence Time-Series Forecasting](https://arxiv.org/abs/2012.07436) by Haoyi Zhou, Shanghang Zhang, Jieqi Peng, Shuai Zhang, Jianxin Li, Hui Xiong, and Wancai Zhang.
|
||||||
|
1. **[InstructBLIP](model_doc/instructblip)** (from Salesforce) released with the paper [InstructBLIP: Towards General-purpose Vision-Language Models with Instruction Tuning](https://arxiv.org/abs/2305.06500) by Wenliang Dai, Junnan Li, Dongxu Li, Anthony Meng Huat Tiong, Junqi Zhao, Weisheng Wang, Boyang Li, Pascale Fung, Steven Hoi.
|
||||||
|
1. **[Jukebox](model_doc/jukebox)** (from OpenAI) released with the paper [Jukebox: A Generative Model for Music](https://arxiv.org/pdf/2005.00341.pdf) by Prafulla Dhariwal, Heewoo Jun, Christine Payne, Jong Wook Kim, Alec Radford, Ilya Sutskever.
|
||||||
|
1. **[LayoutLM](model_doc/layoutlm)** (from Microsoft Research Asia) released with the paper [LayoutLM: Pre-training of Text and Layout for Document Image Understanding](https://arxiv.org/abs/1912.13318) by Yiheng Xu, Minghao Li, Lei Cui, Shaohan Huang, Furu Wei, Ming Zhou.
|
||||||
|
1. **[LayoutLMv2](model_doc/layoutlmv2)** (from Microsoft Research Asia) released with the paper [LayoutLMv2: Multi-modal Pre-training for Visually-Rich Document Understanding](https://arxiv.org/abs/2012.14740) by Yang Xu, Yiheng Xu, Tengchao Lv, Lei Cui, Furu Wei, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Wanxiang Che, Min Zhang, Lidong Zhou.
|
||||||
|
1. **[LayoutLMv3](model_doc/layoutlmv3)** (from Microsoft Research Asia) released with the paper [LayoutLMv3: Pre-training for Document AI with Unified Text and Image Masking](https://arxiv.org/abs/2204.08387) by Yupan Huang, Tengchao Lv, Lei Cui, Yutong Lu, Furu Wei.
|
||||||
|
1. **[LayoutXLM](model_doc/layoutxlm)** (from Microsoft Research Asia) released with the paper [LayoutXLM: Multimodal Pre-training for Multilingual Visually-rich Document Understanding](https://arxiv.org/abs/2104.08836) by Yiheng Xu, Tengchao Lv, Lei Cui, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Furu Wei.
|
||||||
|
1. **[LED](model_doc/led)** (from AllenAI) released with the paper [Longformer: The Long-Document Transformer](https://arxiv.org/abs/2004.05150) by Iz Beltagy, Matthew E. Peters, Arman Cohan.
|
||||||
|
1. **[LeViT](model_doc/levit)** (from Meta AI) released with the paper [LeViT: A Vision Transformer in ConvNet's Clothing for Faster Inference](https://arxiv.org/abs/2104.01136) by Ben Graham, Alaaeldin El-Nouby, Hugo Touvron, Pierre Stock, Armand Joulin, Hervé Jégou, Matthijs Douze.
|
||||||
|
1. **[LiLT](model_doc/lilt)** (from South China University of Technology) released with the paper [LiLT: A Simple yet Effective Language-Independent Layout Transformer for Structured Document Understanding](https://arxiv.org/abs/2202.13669) by Jiapeng Wang, Lianwen Jin, Kai Ding.
|
||||||
|
1. **[LLaMA](model_doc/llama)** (from The FAIR team of Meta AI) released with the paper [LLaMA: Open and Efficient Foundation Language Models](https://arxiv.org/abs/2302.13971) by Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timothée Lacroix, Baptiste Rozière, Naman Goyal, Eric Hambro, Faisal Azhar, Aurelien Rodriguez, Armand Joulin, Edouard Grave, Guillaume Lample.
|
||||||
|
1. **[Llama2](model_doc/llama2)** (from The FAIR team of Meta AI) released with the paper [Llama2: Open Foundation and Fine-Tuned Chat Models](https://ai.meta.com/research/publications/llama-2-open-foundation-and-fine-tuned-chat-models/XXX) by Hugo Touvron, Louis Martin, Kevin Stone, Peter Albert, Amjad Almahairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajjwal Bhargava, Shruti Bhosale, Dan Bikel, Lukas Blecher, Cristian Canton Ferrer, Moya Chen, Guillem Cucurull, David Esiobu, Jude Fernandes, Jeremy Fu, Wenyin Fu, Brian Fuller, Cynthia Gao, Vedanuj Goswami, Naman Goyal, Anthony Hartshorn, Saghar Hosseini, Rui Hou, Hakan Inan, Marcin Kardas, Viktor Kerkez Madian Khabsa, Isabel Kloumann, Artem Korenev, Punit Singh Koura, Marie-Anne Lachaux, Thibaut Lavril, Jenya Lee, Diana Liskovich, Yinghai Lu, Yuning Mao, Xavier Martinet, Todor Mihaylov, Pushka rMishra, Igor Molybog, Yixin Nie, Andrew Poulton, Jeremy Reizenstein, Rashi Rungta, Kalyan Saladi, Alan Schelten, Ruan Silva, Eric Michael Smith, Ranjan Subramanian, Xiaoqing EllenTan, Binh Tang, Ross Taylor, Adina Williams, Jian Xiang Kuan, Puxin Xu, Zheng Yan, Iliyan Zarov, Yuchen Zhang, Angela Fan, Melanie Kambadur, Sharan Narang, Aurelien Rodriguez, Robert Stojnic, Sergey Edunov, Thomas Scialom.
|
||||||
|
1. **[Longformer](model_doc/longformer)** (from AllenAI) released with the paper [Longformer: The Long-Document Transformer](https://arxiv.org/abs/2004.05150) by Iz Beltagy, Matthew E. Peters, Arman Cohan.
|
||||||
|
1. **[LongT5](model_doc/longt5)** (from Google AI) released with the paper [LongT5: Efficient Text-To-Text Transformer for Long Sequences](https://arxiv.org/abs/2112.07916) by Mandy Guo, Joshua Ainslie, David Uthus, Santiago Ontanon, Jianmo Ni, Yun-Hsuan Sung, Yinfei Yang.
|
||||||
|
1. **[LUKE](model_doc/luke)** (from Studio Ousia) released with the paper [LUKE: Deep Contextualized Entity Representations with Entity-aware Self-attention](https://arxiv.org/abs/2010.01057) by Ikuya Yamada, Akari Asai, Hiroyuki Shindo, Hideaki Takeda, Yuji Matsumoto.
|
||||||
|
1. **[LXMERT](model_doc/lxmert)** (from UNC Chapel Hill) released with the paper [LXMERT: Learning Cross-Modality Encoder Representations from Transformers for Open-Domain Question Answering](https://arxiv.org/abs/1908.07490) by Hao Tan and Mohit Bansal.
|
||||||
|
1. **[M-CTC-T](model_doc/mctct)** (from Facebook) released with the paper [Pseudo-Labeling For Massively Multilingual Speech Recognition](https://arxiv.org/abs/2111.00161) by Loren Lugosch, Tatiana Likhomanenko, Gabriel Synnaeve, and Ronan Collobert.
|
||||||
|
1. **[M2M100](model_doc/m2m_100)** (from Facebook) released with the paper [Beyond English-Centric Multilingual Machine Translation](https://arxiv.org/abs/2010.11125) by Angela Fan, Shruti Bhosale, Holger Schwenk, Zhiyi Ma, Ahmed El-Kishky, Siddharth Goyal, Mandeep Baines, Onur Celebi, Guillaume Wenzek, Vishrav Chaudhary, Naman Goyal, Tom Birch, Vitaliy Liptchinsky, Sergey Edunov, Edouard Grave, Michael Auli, Armand Joulin.
|
||||||
|
1. **[MarianMT](model_doc/marian)** Machine translation models trained using [OPUS](http://opus.nlpl.eu/) data by Jörg Tiedemann. The [Marian Framework](https://marian-nmt.github.io/) is being developed by the Microsoft Translator Team.
|
||||||
|
1. **[MarkupLM](model_doc/markuplm)** (from Microsoft Research Asia) released with the paper [MarkupLM: Pre-training of Text and Markup Language for Visually-rich Document Understanding](https://arxiv.org/abs/2110.08518) by Junlong Li, Yiheng Xu, Lei Cui, Furu Wei.
|
||||||
|
1. **[Mask2Former](model_doc/mask2former)** (from FAIR and UIUC) released with the paper [Masked-attention Mask Transformer for Universal Image Segmentation](https://arxiv.org/abs/2112.01527) by Bowen Cheng, Ishan Misra, Alexander G. Schwing, Alexander Kirillov, Rohit Girdhar.
|
||||||
|
1. **[MaskFormer](model_doc/maskformer)** (from Meta and UIUC) released with the paper [Per-Pixel Classification is Not All You Need for Semantic Segmentation](https://arxiv.org/abs/2107.06278) by Bowen Cheng, Alexander G. Schwing, Alexander Kirillov.
|
||||||
|
1. **[MatCha](model_doc/matcha)** (from Google AI) released with the paper [MatCha: Enhancing Visual Language Pretraining with Math Reasoning and Chart Derendering](https://arxiv.org/abs/2212.09662) by Fangyu Liu, Francesco Piccinno, Syrine Krichene, Chenxi Pang, Kenton Lee, Mandar Joshi, Yasemin Altun, Nigel Collier, Julian Martin Eisenschlos.
|
||||||
|
1. **[mBART](model_doc/mbart)** (from Facebook) released with the paper [Multilingual Denoising Pre-training for Neural Machine Translation](https://arxiv.org/abs/2001.08210) by Yinhan Liu, Jiatao Gu, Naman Goyal, Xian Li, Sergey Edunov, Marjan Ghazvininejad, Mike Lewis, Luke Zettlemoyer.
|
||||||
|
1. **[mBART-50](model_doc/mbart)** (from Facebook) released with the paper [Multilingual Translation with Extensible Multilingual Pretraining and Finetuning](https://arxiv.org/abs/2008.00401) by Yuqing Tang, Chau Tran, Xian Li, Peng-Jen Chen, Naman Goyal, Vishrav Chaudhary, Jiatao Gu, Angela Fan.
|
||||||
|
1. **[MEGA](model_doc/mega)** (from Meta/USC/CMU/SJTU) released with the paper [Mega: Moving Average Equipped Gated Attention](https://arxiv.org/abs/2209.10655) by Xuezhe Ma, Chunting Zhou, Xiang Kong, Junxian He, Liangke Gui, Graham Neubig, Jonathan May, and Luke Zettlemoyer.
|
||||||
|
1. **[Megatron-BERT](model_doc/megatron-bert)** (from NVIDIA) released with the paper [Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism](https://arxiv.org/abs/1909.08053) by Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper and Bryan Catanzaro.
|
||||||
|
1. **[Megatron-GPT2](model_doc/megatron_gpt2)** (from NVIDIA) released with the paper [Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism](https://arxiv.org/abs/1909.08053) by Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper and Bryan Catanzaro.
|
||||||
|
1. **[MGP-STR](model_doc/mgp-str)** (from Alibaba Research) released with the paper [Multi-Granularity Prediction for Scene Text Recognition](https://arxiv.org/abs/2209.03592) by Peng Wang, Cheng Da, and Cong Yao.
|
||||||
|
1. **[mLUKE](model_doc/mluke)** (from Studio Ousia) released with the paper [mLUKE: The Power of Entity Representations in Multilingual Pretrained Language Models](https://arxiv.org/abs/2110.08151) by Ryokan Ri, Ikuya Yamada, and Yoshimasa Tsuruoka.
|
||||||
|
1. **[MMS](model_doc/mms)** (from Facebook) released with the paper [Scaling Speech Technology to 1,000+ Languages](https://arxiv.org/abs/2305.13516) by Vineel Pratap, Andros Tjandra, Bowen Shi, Paden Tomasello, Arun Babu, Sayani Kundu, Ali Elkahky, Zhaoheng Ni, Apoorv Vyas, Maryam Fazel-Zarandi, Alexei Baevski, Yossi Adi, Xiaohui Zhang, Wei-Ning Hsu, Alexis Conneau, Michael Auli.
|
||||||
|
1. **[MobileBERT](model_doc/mobilebert)** (from CMU/Google Brain) released with the paper [MobileBERT: a Compact Task-Agnostic BERT for Resource-Limited Devices](https://arxiv.org/abs/2004.02984) by Zhiqing Sun, Hongkun Yu, Xiaodan Song, Renjie Liu, Yiming Yang, and Denny Zhou.
|
||||||
|
1. **[MobileNetV1](model_doc/mobilenet_v1)** (from Google Inc.) released with the paper [MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications](https://arxiv.org/abs/1704.04861) by Andrew G. Howard, Menglong Zhu, Bo Chen, Dmitry Kalenichenko, Weijun Wang, Tobias Weyand, Marco Andreetto, Hartwig Adam.
|
||||||
|
1. **[MobileNetV2](model_doc/mobilenet_v2)** (from Google Inc.) released with the paper [MobileNetV2: Inverted Residuals and Linear Bottlenecks](https://arxiv.org/abs/1801.04381) by Mark Sandler, Andrew Howard, Menglong Zhu, Andrey Zhmoginov, Liang-Chieh Chen.
|
||||||
|
1. **[MobileViT](model_doc/mobilevit)** (from Apple) released with the paper [MobileViT: Light-weight, General-purpose, and Mobile-friendly Vision Transformer](https://arxiv.org/abs/2110.02178) by Sachin Mehta and Mohammad Rastegari.
|
||||||
|
1. **[MobileViTV2](model_doc/mobilevitv2)** (from Apple) released with the paper [Separable Self-attention for Mobile Vision Transformers](https://arxiv.org/abs/2206.02680) by Sachin Mehta and Mohammad Rastegari.
|
||||||
|
1. **[MPNet](model_doc/mpnet)** (from Microsoft Research) released with the paper [MPNet: Masked and Permuted Pre-training for Language Understanding](https://arxiv.org/abs/2004.09297) by Kaitao Song, Xu Tan, Tao Qin, Jianfeng Lu, Tie-Yan Liu.
|
||||||
|
1. **[MRA](model_doc/mra)** (from the University of Wisconsin - Madison) released with the paper [Multi Resolution Analysis (MRA) for Approximate Self-Attention](https://arxiv.org/abs/2207.10284) by Zhanpeng Zeng, Sourav Pal, Jeffery Kline, Glenn M Fung, Vikas Singh.
|
||||||
|
1. **[MT5](model_doc/mt5)** (from Google AI) released with the paper [mT5: A massively multilingual pre-trained text-to-text transformer](https://arxiv.org/abs/2010.11934) by Linting Xue, Noah Constant, Adam Roberts, Mihir Kale, Rami Al-Rfou, Aditya Siddhant, Aditya Barua, Colin Raffel.
|
||||||
|
1. **[MusicGen](model_doc/musicgen)** (from Meta) released with the paper [Simple and Controllable Music Generation](https://arxiv.org/abs/2306.05284) by Jade Copet, Felix Kreuk, Itai Gat, Tal Remez, David Kant, Gabriel Synnaeve, Yossi Adi and Alexandre Défossez.
|
||||||
|
1. **[MVP](model_doc/mvp)** (from RUC AI Box) released with the paper [MVP: Multi-task Supervised Pre-training for Natural Language Generation](https://arxiv.org/abs/2206.12131) by Tianyi Tang, Junyi Li, Wayne Xin Zhao and Ji-Rong Wen.
|
||||||
|
1. **[NAT](model_doc/nat)** (from SHI Labs) released with the paper [Neighborhood Attention Transformer](https://arxiv.org/abs/2204.07143) by Ali Hassani, Steven Walton, Jiachen Li, Shen Li, and Humphrey Shi.
|
||||||
|
1. **[Nezha](model_doc/nezha)** (from Huawei Noah’s Ark Lab) released with the paper [NEZHA: Neural Contextualized Representation for Chinese Language Understanding](https://arxiv.org/abs/1909.00204) by Junqiu Wei, Xiaozhe Ren, Xiaoguang Li, Wenyong Huang, Yi Liao, Yasheng Wang, Jiashu Lin, Xin Jiang, Xiao Chen and Qun Liu.
|
||||||
|
1. **[NLLB](model_doc/nllb)** (from Meta) released with the paper [No Language Left Behind: Scaling Human-Centered Machine Translation](https://arxiv.org/abs/2207.04672) by the NLLB team.
|
||||||
|
1. **[NLLB-MOE](model_doc/nllb-moe)** (from Meta) released with the paper [No Language Left Behind: Scaling Human-Centered Machine Translation](https://arxiv.org/abs/2207.04672) by the NLLB team.
|
||||||
|
1. **[Nyströmformer](model_doc/nystromformer)** (from the University of Wisconsin - Madison) released with the paper [Nyströmformer: A Nyström-Based Algorithm for Approximating Self-Attention](https://arxiv.org/abs/2102.03902) by Yunyang Xiong, Zhanpeng Zeng, Rudrasis Chakraborty, Mingxing Tan, Glenn Fung, Yin Li, Vikas Singh.
|
||||||
|
1. **[OneFormer](model_doc/oneformer)** (from SHI Labs) released with the paper [OneFormer: One Transformer to Rule Universal Image Segmentation](https://arxiv.org/abs/2211.06220) by Jitesh Jain, Jiachen Li, MangTik Chiu, Ali Hassani, Nikita Orlov, Humphrey Shi.
|
||||||
|
1. **[OpenLlama](model_doc/open-llama)** (from [s-JoL](https://huggingface.co/s-JoL)) released in [Open-Llama](https://github.com/s-JoL/Open-Llama).
|
||||||
|
1. **[OPT](master/model_doc/opt)** (from Meta AI) released with the paper [OPT: Open Pre-trained Transformer Language Models](https://arxiv.org/abs/2205.01068) by Susan Zhang, Stephen Roller, Naman Goyal, Mikel Artetxe, Moya Chen, Shuohui Chen et al.
|
||||||
|
1. **[OWL-ViT](model_doc/owlvit)** (from Google AI) released with the paper [Simple Open-Vocabulary Object Detection with Vision Transformers](https://arxiv.org/abs/2205.06230) by Matthias Minderer, Alexey Gritsenko, Austin Stone, Maxim Neumann, Dirk Weissenborn, Alexey Dosovitskiy, Aravindh Mahendran, Anurag Arnab, Mostafa Dehghani, Zhuoran Shen, Xiao Wang, Xiaohua Zhai, Thomas Kipf, and Neil Houlsby.
|
||||||
|
1. **[Pegasus](model_doc/pegasus)** (from Google) released with the paper [PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive Summarization](https://arxiv.org/abs/1912.08777) by Jingqing Zhang, Yao Zhao, Mohammad Saleh and Peter J. Liu.
|
||||||
|
1. **[PEGASUS-X](model_doc/pegasus_x)** (from Google) released with the paper [Investigating Efficiently Extending Transformers for Long Input Summarization](https://arxiv.org/abs/2208.04347) by Jason Phang, Yao Zhao, and Peter J. Liu.
|
||||||
|
1. **[Perceiver IO](model_doc/perceiver)** (from Deepmind) released with the paper [Perceiver IO: A General Architecture for Structured Inputs & Outputs](https://arxiv.org/abs/2107.14795) by Andrew Jaegle, Sebastian Borgeaud, Jean-Baptiste Alayrac, Carl Doersch, Catalin Ionescu, David Ding, Skanda Koppula, Daniel Zoran, Andrew Brock, Evan Shelhamer, Olivier Hénaff, Matthew M. Botvinick, Andrew Zisserman, Oriol Vinyals, João Carreira.
|
||||||
|
1. **[PhoBERT](model_doc/phobert)** (from VinAI Research) released with the paper [PhoBERT: Pre-trained language models for Vietnamese](https://www.aclweb.org/anthology/2020.findings-emnlp.92/) by Dat Quoc Nguyen and Anh Tuan Nguyen.
|
||||||
|
1. **[Pix2Struct](model_doc/pix2struct)** (from Google) released with the paper [Pix2Struct: Screenshot Parsing as Pretraining for Visual Language Understanding](https://arxiv.org/abs/2210.03347) by Kenton Lee, Mandar Joshi, Iulia Turc, Hexiang Hu, Fangyu Liu, Julian Eisenschlos, Urvashi Khandelwal, Peter Shaw, Ming-Wei Chang, Kristina Toutanova.
|
||||||
|
1. **[PLBart](model_doc/plbart)** (from UCLA NLP) released with the paper [Unified Pre-training for Program Understanding and Generation](https://arxiv.org/abs/2103.06333) by Wasi Uddin Ahmad, Saikat Chakraborty, Baishakhi Ray, Kai-Wei Chang.
|
||||||
|
1. **[PoolFormer](model_doc/poolformer)** (from Sea AI Labs) released with the paper [MetaFormer is Actually What You Need for Vision](https://arxiv.org/abs/2111.11418) by Yu, Weihao and Luo, Mi and Zhou, Pan and Si, Chenyang and Zhou, Yichen and Wang, Xinchao and Feng, Jiashi and Yan, Shuicheng.
|
||||||
|
1. **[ProphetNet](model_doc/prophetnet)** (from Microsoft Research) released with the paper [ProphetNet: Predicting Future N-gram for Sequence-to-Sequence Pre-training](https://arxiv.org/abs/2001.04063) by Yu Yan, Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang and Ming Zhou.
|
||||||
|
1. **[QDQBert](model_doc/qdqbert)** (from NVIDIA) released with the paper [Integer Quantization for Deep Learning Inference: Principles and Empirical Evaluation](https://arxiv.org/abs/2004.09602) by Hao Wu, Patrick Judd, Xiaojie Zhang, Mikhail Isaev and Paulius Micikevicius.
|
||||||
|
1. **[RAG](model_doc/rag)** (from Facebook) released with the paper [Retrieval-Augmented Generation for Knowledge-Intensive NLP Tasks](https://arxiv.org/abs/2005.11401) by Patrick Lewis, Ethan Perez, Aleksandara Piktus, Fabio Petroni, Vladimir Karpukhin, Naman Goyal, Heinrich Küttler, Mike Lewis, Wen-tau Yih, Tim Rocktäschel, Sebastian Riedel, Douwe Kiela.
|
||||||
|
1. **[REALM](model_doc/realm.html)** (from Google Research) released with the paper [REALM: Retrieval-Augmented Language Model Pre-Training](https://arxiv.org/abs/2002.08909) by Kelvin Guu, Kenton Lee, Zora Tung, Panupong Pasupat and Ming-Wei Chang.
|
||||||
|
1. **[Reformer](model_doc/reformer)** (from Google Research) released with the paper [Reformer: The Efficient Transformer](https://arxiv.org/abs/2001.04451) by Nikita Kitaev, Łukasz Kaiser, Anselm Levskaya.
|
||||||
|
1. **[RegNet](model_doc/regnet)** (from META Platforms) released with the paper [Designing Network Design Space](https://arxiv.org/abs/2003.13678) by Ilija Radosavovic, Raj Prateek Kosaraju, Ross Girshick, Kaiming He, Piotr Dollár.
|
||||||
|
1. **[RemBERT](model_doc/rembert)** (from Google Research) released with the paper [Rethinking embedding coupling in pre-trained language models](https://arxiv.org/abs/2010.12821) by Hyung Won Chung, Thibault Févry, Henry Tsai, M. Johnson, Sebastian Ruder.
|
||||||
|
1. **[ResNet](model_doc/resnet)** (from Microsoft Research) released with the paper [Deep Residual Learning for Image Recognition](https://arxiv.org/abs/1512.03385) by Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun.
|
||||||
|
1. **[RoBERTa](model_doc/roberta)** (from Facebook), released together with the paper [RoBERTa: A Robustly Optimized BERT Pretraining Approach](https://arxiv.org/abs/1907.11692) by Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, Veselin Stoyanov.
|
||||||
|
1. **[RoBERTa-PreLayerNorm](model_doc/roberta-prelayernorm)** (from Facebook) released with the paper [fairseq: A Fast, Extensible Toolkit for Sequence Modeling](https://arxiv.org/abs/1904.01038) by Myle Ott, Sergey Edunov, Alexei Baevski, Angela Fan, Sam Gross, Nathan Ng, David Grangier, Michael Auli.
|
||||||
|
1. **[RoCBert](model_doc/roc_bert)** (from WeChatAI) released with the paper [RoCBert: Robust Chinese Bert with Multimodal Contrastive Pretraining](https://aclanthology.org/2022.acl-long.65.pdf) by HuiSu, WeiweiShi, XiaoyuShen, XiaoZhou, TuoJi, JiaruiFang, JieZhou.
|
||||||
|
1. **[RoFormer](model_doc/roformer)** (from ZhuiyiTechnology), released together with the paper [RoFormer: Enhanced Transformer with Rotary Position Embedding](https://arxiv.org/abs/2104.09864) by Jianlin Su and Yu Lu and Shengfeng Pan and Bo Wen and Yunfeng Liu.
|
||||||
|
1. **[RWKV](model_doc/rwkv)** (from Bo Peng), released on [this repo](https://github.com/BlinkDL/RWKV-LM) by Bo Peng.
|
||||||
|
1. **[SegFormer](model_doc/segformer)** (from NVIDIA) released with the paper [SegFormer: Simple and Efficient Design for Semantic Segmentation with Transformers](https://arxiv.org/abs/2105.15203) by Enze Xie, Wenhai Wang, Zhiding Yu, Anima Anandkumar, Jose M. Alvarez, Ping Luo.
|
||||||
|
1. **[Segment Anything](model_doc/sam)** (from Meta AI) released with the paper [Segment Anything](https://arxiv.org/pdf/2304.02643v1.pdf) by Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alex Berg, Wan-Yen Lo, Piotr Dollar, Ross Girshick.
|
||||||
|
1. **[SEW](model_doc/sew)** (from ASAPP) released with the paper [Performance-Efficiency Trade-offs in Unsupervised Pre-training for Speech Recognition](https://arxiv.org/abs/2109.06870) by Felix Wu, Kwangyoun Kim, Jing Pan, Kyu Han, Kilian Q. Weinberger, Yoav Artzi.
|
||||||
|
1. **[SEW-D](model_doc/sew_d)** (from ASAPP) released with the paper [Performance-Efficiency Trade-offs in Unsupervised Pre-training for Speech Recognition](https://arxiv.org/abs/2109.06870) by Felix Wu, Kwangyoun Kim, Jing Pan, Kyu Han, Kilian Q. Weinberger, Yoav Artzi.
|
||||||
|
1. **[SpeechT5](model_doc/speecht5)** (from Microsoft Research) released with the paper [SpeechT5: Unified-Modal Encoder-Decoder Pre-Training for Spoken Language Processing](https://arxiv.org/abs/2110.07205) by Junyi Ao, Rui Wang, Long Zhou, Chengyi Wang, Shuo Ren, Yu Wu, Shujie Liu, Tom Ko, Qing Li, Yu Zhang, Zhihua Wei, Yao Qian, Jinyu Li, Furu Wei.
|
||||||
|
1. **[SpeechToTextTransformer](model_doc/speech_to_text)** (from Facebook), released together with the paper [fairseq S2T: Fast Speech-to-Text Modeling with fairseq](https://arxiv.org/abs/2010.05171) by Changhan Wang, Yun Tang, Xutai Ma, Anne Wu, Dmytro Okhonko, Juan Pino.
|
||||||
|
1. **[SpeechToTextTransformer2](model_doc/speech_to_text_2)** (from Facebook), released together with the paper [Large-Scale Self- and Semi-Supervised Learning for Speech Translation](https://arxiv.org/abs/2104.06678) by Changhan Wang, Anne Wu, Juan Pino, Alexei Baevski, Michael Auli, Alexis Conneau.
|
||||||
|
1. **[Splinter](model_doc/splinter)** (from Tel Aviv University), released together with the paper [Few-Shot Question Answering by Pretraining Span Selection](https://arxiv.org/abs/2101.00438) by Ori Ram, Yuval Kirstain, Jonathan Berant, Amir Globerson, Omer Levy.
|
||||||
|
1. **[SqueezeBERT](model_doc/squeezebert)** (from Berkeley) released with the paper [SqueezeBERT: What can computer vision teach NLP about efficient neural networks?](https://arxiv.org/abs/2006.11316) by Forrest N. Iandola, Albert E. Shaw, Ravi Krishna, and Kurt W. Keutzer.
|
||||||
|
1. **[SwiftFormer](model_doc/swiftformer)** (from MBZUAI) released with the paper [SwiftFormer: Efficient Additive Attention for Transformer-based Real-time Mobile Vision Applications](https://arxiv.org/abs/2303.15446) by Abdelrahman Shaker, Muhammad Maaz, Hanoona Rasheed, Salman Khan, Ming-Hsuan Yang, Fahad Shahbaz Khan.
|
||||||
|
1. **[Swin Transformer](model_doc/swin)** (from Microsoft) released with the paper [Swin Transformer: Hierarchical Vision Transformer using Shifted Windows](https://arxiv.org/abs/2103.14030) by Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, Baining Guo.
|
||||||
|
1. **[Swin Transformer V2](model_doc/swinv2)** (from Microsoft) released with the paper [Swin Transformer V2: Scaling Up Capacity and Resolution](https://arxiv.org/abs/2111.09883) by Ze Liu, Han Hu, Yutong Lin, Zhuliang Yao, Zhenda Xie, Yixuan Wei, Jia Ning, Yue Cao, Zheng Zhang, Li Dong, Furu Wei, Baining Guo.
|
||||||
|
1. **[Swin2SR](model_doc/swin2sr)** (from University of Würzburg) released with the paper [Swin2SR: SwinV2 Transformer for Compressed Image Super-Resolution and Restoration](https://arxiv.org/abs/2209.11345) by Marcos V. Conde, Ui-Jin Choi, Maxime Burchi, Radu Timofte.
|
||||||
|
1. **[SwitchTransformers](model_doc/switch_transformers)** (from Google) released with the paper [Switch Transformers: Scaling to Trillion Parameter Models with Simple and Efficient Sparsity](https://arxiv.org/abs/2101.03961) by William Fedus, Barret Zoph, Noam Shazeer.
|
||||||
|
1. **[T5](model_doc/t5)** (from Google AI) released with the paper [Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer](https://arxiv.org/abs/1910.10683) by Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu.
|
||||||
|
1. **[T5v1.1](model_doc/t5v1.1)** (from Google AI) released in the repository [google-research/text-to-text-transfer-transformer](https://github.com/google-research/text-to-text-transfer-transformer/blob/main/released_checkpoints.md#t511) by Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu.
|
||||||
|
1. **[Table Transformer](model_doc/table-transformer)** (from Microsoft Research) released with the paper [PubTables-1M: Towards Comprehensive Table Extraction From Unstructured Documents](https://arxiv.org/abs/2110.00061) by Brandon Smock, Rohith Pesala, Robin Abraham.
|
||||||
|
1. **[TAPAS](model_doc/tapas)** (from Google AI) released with the paper [TAPAS: Weakly Supervised Table Parsing via Pre-training](https://arxiv.org/abs/2004.02349) by Jonathan Herzig, Paweł Krzysztof Nowak, Thomas Müller, Francesco Piccinno and Julian Martin Eisenschlos.
|
||||||
|
1. **[TAPEX](model_doc/tapex)** (from Microsoft Research) released with the paper [TAPEX: Table Pre-training via Learning a Neural SQL Executor](https://arxiv.org/abs/2107.07653) by Qian Liu, Bei Chen, Jiaqi Guo, Morteza Ziyadi, Zeqi Lin, Weizhu Chen, Jian-Guang Lou.
|
||||||
|
1. **[Time Series Transformer](model_doc/time_series_transformer)** (from HuggingFace).
|
||||||
|
1. **[TimeSformer](model_doc/timesformer)** (from Facebook) released with the paper [Is Space-Time Attention All You Need for Video Understanding?](https://arxiv.org/abs/2102.05095) by Gedas Bertasius, Heng Wang, Lorenzo Torresani.
|
||||||
|
1. **[Trajectory Transformer](model_doc/trajectory_transformers)** (from the University of California at Berkeley) released with the paper [Offline Reinforcement Learning as One Big Sequence Modeling Problem](https://arxiv.org/abs/2106.02039) by Michael Janner, Qiyang Li, Sergey Levine
|
||||||
|
1. **[Transformer-XL](model_doc/transfo-xl)** (from Google/CMU) released with the paper [Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context](https://arxiv.org/abs/1901.02860) by Zihang Dai*, Zhilin Yang*, Yiming Yang, Jaime Carbonell, Quoc V. Le, Ruslan Salakhutdinov.
|
||||||
|
1. **[TrOCR](model_doc/trocr)** (from Microsoft), released together with the paper [TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models](https://arxiv.org/abs/2109.10282) by Minghao Li, Tengchao Lv, Lei Cui, Yijuan Lu, Dinei Florencio, Cha Zhang, Zhoujun Li, Furu Wei.
|
||||||
|
1. **[TVLT](model_doc/tvlt)** (from UNC Chapel Hill) released with the paper [TVLT: Textless Vision-Language Transformer](https://arxiv.org/abs/2209.14156) by Zineng Tang, Jaemin Cho, Yixin Nie, Mohit Bansal.
|
||||||
|
1. **[UL2](model_doc/ul2)** (from Google Research) released with the paper [Unifying Language Learning Paradigms](https://arxiv.org/abs/2205.05131v1) by Yi Tay, Mostafa Dehghani, Vinh Q. Tran, Xavier Garcia, Dara Bahri, Tal Schuster, Huaixiu Steven Zheng, Neil Houlsby, Donald Metzler
|
||||||
|
1. **[UMT5](model_doc/umt5)** (from Google Research) released with the paper [UniMax: Fairer and More Effective Language Sampling for Large-Scale Multilingual Pretraining](https://openreview.net/forum?id=kXwdL1cWOAi) by Hyung Won Chung, Xavier Garcia, Adam Roberts, Yi Tay, Orhan Firat, Sharan Narang, Noah Constant.
|
||||||
|
1. **[UniSpeech](model_doc/unispeech)** (from Microsoft Research) released with the paper [UniSpeech: Unified Speech Representation Learning with Labeled and Unlabeled Data](https://arxiv.org/abs/2101.07597) by Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei, Michael Zeng, Xuedong Huang.
|
||||||
|
1. **[UniSpeechSat](model_doc/unispeech-sat)** (from Microsoft Research) released with the paper [UNISPEECH-SAT: UNIVERSAL SPEECH REPRESENTATION LEARNING WITH SPEAKER AWARE PRE-TRAINING](https://arxiv.org/abs/2110.05752) by Sanyuan Chen, Yu Wu, Chengyi Wang, Zhengyang Chen, Zhuo Chen, Shujie Liu, Jian Wu, Yao Qian, Furu Wei, Jinyu Li, Xiangzhan Yu.
|
||||||
|
1. **[UPerNet](model_doc/upernet)** (from Peking University) released with the paper [Unified Perceptual Parsing for Scene Understanding](https://arxiv.org/abs/1807.10221) by Tete Xiao, Yingcheng Liu, Bolei Zhou, Yuning Jiang, Jian Sun.
|
||||||
|
1. **[VAN](model_doc/van)** (from Tsinghua University and Nankai University) released with the paper [Visual Attention Network](https://arxiv.org/abs/2202.09741) by Meng-Hao Guo, Cheng-Ze Lu, Zheng-Ning Liu, Ming-Ming Cheng, Shi-Min Hu.
|
||||||
|
1. **[VideoMAE](model_doc/videomae)** (from Multimedia Computing Group, Nanjing University) released with the paper [VideoMAE: Masked Autoencoders are Data-Efficient Learners for Self-Supervised Video Pre-Training](https://arxiv.org/abs/2203.12602) by Zhan Tong, Yibing Song, Jue Wang, Limin Wang.
|
||||||
|
1. **[ViLT](model_doc/vilt)** (from NAVER AI Lab/Kakao Enterprise/Kakao Brain) released with the paper [ViLT: Vision-and-Language Transformer Without Convolution or Region Supervision](https://arxiv.org/abs/2102.03334) by Wonjae Kim, Bokyung Son, Ildoo Kim.
|
||||||
|
1. **[Vision Transformer (ViT)](model_doc/vit)** (from Google AI) released with the paper [An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale](https://arxiv.org/abs/2010.11929) by Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, Neil Houlsby.
|
||||||
|
1. **[VisualBERT](model_doc/visual_bert)** (from UCLA NLP) released with the paper [VisualBERT: A Simple and Performant Baseline for Vision and Language](https://arxiv.org/pdf/1908.03557) by Liunian Harold Li, Mark Yatskar, Da Yin, Cho-Jui Hsieh, Kai-Wei Chang.
|
||||||
|
1. **[ViT Hybrid](model_doc/vit_hybrid)** (from Google AI) released with the paper [An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale](https://arxiv.org/abs/2010.11929) by Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, Neil Houlsby.
|
||||||
|
1. **[ViTMAE](model_doc/vit_mae)** (from Meta AI) released with the paper [Masked Autoencoders Are Scalable Vision Learners](https://arxiv.org/abs/2111.06377) by Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Dollár, Ross Girshick.
|
||||||
|
1. **[ViTMSN](model_doc/vit_msn)** (from Meta AI) released with the paper [Masked Siamese Networks for Label-Efficient Learning](https://arxiv.org/abs/2204.07141) by Mahmoud Assran, Mathilde Caron, Ishan Misra, Piotr Bojanowski, Florian Bordes, Pascal Vincent, Armand Joulin, Michael Rabbat, Nicolas Ballas.
|
||||||
|
1. **[ViViT](model_doc/vivit)** (from Google Research) released with the paper [ViViT: A Video Vision Transformer](https://arxiv.org/abs/2103.15691) by Anurag Arnab, Mostafa Dehghani, Georg Heigold, Chen Sun, Mario Lučić, Cordelia Schmid.
|
||||||
|
1. **[Wav2Vec2](model_doc/wav2vec2)** (from Facebook AI) released with the paper [wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations](https://arxiv.org/abs/2006.11477) by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli.
|
||||||
|
1. **[Wav2Vec2-Conformer](model_doc/wav2vec2-conformer)** (from Facebook AI) released with the paper [FAIRSEQ S2T: Fast Speech-to-Text Modeling with FAIRSEQ](https://arxiv.org/abs/2010.05171) by Changhan Wang, Yun Tang, Xutai Ma, Anne Wu, Sravya Popuri, Dmytro Okhonko, Juan Pino.
|
||||||
|
1. **[Wav2Vec2Phoneme](model_doc/wav2vec2_phoneme)** (from Facebook AI) released with the paper [Simple and Effective Zero-shot Cross-lingual Phoneme Recognition](https://arxiv.org/abs/2109.11680) by Qiantong Xu, Alexei Baevski, Michael Auli.
|
||||||
|
1. **[WavLM](model_doc/wavlm)** (from Microsoft Research) released with the paper [WavLM: Large-Scale Self-Supervised Pre-Training for Full Stack Speech Processing](https://arxiv.org/abs/2110.13900) by Sanyuan Chen, Chengyi Wang, Zhengyang Chen, Yu Wu, Shujie Liu, Zhuo Chen, Jinyu Li, Naoyuki Kanda, Takuya Yoshioka, Xiong Xiao, Jian Wu, Long Zhou, Shuo Ren, Yanmin Qian, Yao Qian, Jian Wu, Michael Zeng, Furu Wei.
|
||||||
|
1. **[Whisper](model_doc/whisper)** (from OpenAI) released with the paper [Robust Speech Recognition via Large-Scale Weak Supervision](https://cdn.openai.com/papers/whisper.pdf) by Alec Radford, Jong Wook Kim, Tao Xu, Greg Brockman, Christine McLeavey, Ilya Sutskever.
|
||||||
|
1. **[X-CLIP](model_doc/xclip)** (from Microsoft Research) released with the paper [Expanding Language-Image Pretrained Models for General Video Recognition](https://arxiv.org/abs/2208.02816) by Bolin Ni, Houwen Peng, Minghao Chen, Songyang Zhang, Gaofeng Meng, Jianlong Fu, Shiming Xiang, Haibin Ling.
|
||||||
|
1. **[X-MOD](model_doc/xmod)** (from Meta AI) released with the paper [Lifting the Curse of Multilinguality by Pre-training Modular Transformers](http://dx.doi.org/10.18653/v1/2022.naacl-main.255) by Jonas Pfeiffer, Naman Goyal, Xi Lin, Xian Li, James Cross, Sebastian Riedel, Mikel Artetxe.
|
||||||
|
1. **[XGLM](model_doc/xglm)** (From Facebook AI) released with the paper [Few-shot Learning with Multilingual Language Models](https://arxiv.org/abs/2112.10668) by Xi Victoria Lin, Todor Mihaylov, Mikel Artetxe, Tianlu Wang, Shuohui Chen, Daniel Simig, Myle Ott, Naman Goyal, Shruti Bhosale, Jingfei Du, Ramakanth Pasunuru, Sam Shleifer, Punit Singh Koura, Vishrav Chaudhary, Brian O'Horo, Jeff Wang, Luke Zettlemoyer, Zornitsa Kozareva, Mona Diab, Veselin Stoyanov, Xian Li.
|
||||||
|
1. **[XLM](model_doc/xlm)** (from Facebook) released together with the paper [Cross-lingual Language Model Pretraining](https://arxiv.org/abs/1901.07291) by Guillaume Lample and Alexis Conneau.
|
||||||
|
1. **[XLM-ProphetNet](model_doc/xlm-prophetnet)** (from Microsoft Research) released with the paper [ProphetNet: Predicting Future N-gram for Sequence-to-Sequence Pre-training](https://arxiv.org/abs/2001.04063) by Yu Yan, Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang and Ming Zhou.
|
||||||
|
1. **[XLM-RoBERTa](model_doc/xlm-roberta)** (from Facebook AI), released together with the paper [Unsupervised Cross-lingual Representation Learning at Scale](https://arxiv.org/abs/1911.02116) by Alexis Conneau*, Kartikay Khandelwal*, Naman Goyal, Vishrav Chaudhary, Guillaume Wenzek, Francisco Guzmán, Edouard Grave, Myle Ott, Luke Zettlemoyer and Veselin Stoyanov.
|
||||||
|
1. **[XLM-RoBERTa-XL](model_doc/xlm-roberta-xl)** (from Facebook AI), released together with the paper [Larger-Scale Transformers for Multilingual Masked Language Modeling](https://arxiv.org/abs/2105.00572) by Naman Goyal, Jingfei Du, Myle Ott, Giri Anantharaman, Alexis Conneau.
|
||||||
|
1. **[XLM-V](model_doc/xlm-v)** (from Meta AI) released with the paper [XLM-V: Overcoming the Vocabulary Bottleneck in Multilingual Masked Language Models](https://arxiv.org/abs/2301.10472) by Davis Liang, Hila Gonen, Yuning Mao, Rui Hou, Naman Goyal, Marjan Ghazvininejad, Luke Zettlemoyer, Madian Khabsa.
|
||||||
|
1. **[XLNet](model_doc/xlnet)** (from Google/CMU) released with the paper [XLNet: Generalized Autoregressive Pretraining for Language Understanding](https://arxiv.org/abs/1906.08237) by Zhilin Yang*, Zihang Dai*, Yiming Yang, Jaime Carbonell, Ruslan Salakhutdinov, Quoc V. Le.
|
||||||
|
1. **[XLS-R](model_doc/xls_r)** (from Facebook AI) released with the paper [XLS-R: Self-supervised Cross-lingual Speech Representation Learning at Scale](https://arxiv.org/abs/2111.09296) by Arun Babu, Changhan Wang, Andros Tjandra, Kushal Lakhotia, Qiantong Xu, Naman Goyal, Kritika Singh, Patrick von Platen, Yatharth Saraf, Juan Pino, Alexei Baevski, Alexis Conneau, Michael Auli.
|
||||||
|
1. **[XLSR-Wav2Vec2](model_doc/xlsr_wav2vec2)** (from Facebook AI) released with the paper [Unsupervised Cross-Lingual Representation Learning For Speech Recognition](https://arxiv.org/abs/2006.13979) by Alexis Conneau, Alexei Baevski, Ronan Collobert, Abdelrahman Mohamed, Michael Auli.
|
||||||
|
1. **[YOLOS](model_doc/yolos)** (from Huazhong University of Science & Technology) released with the paper [You Only Look at One Sequence: Rethinking Transformer in Vision through Object Detection](https://arxiv.org/abs/2106.00666) by Yuxin Fang, Bencheng Liao, Xinggang Wang, Jiemin Fang, Jiyang Qi, Rui Wu, Jianwei Niu, Wenyu Liu.
|
||||||
|
1. **[YOSO](model_doc/yoso)** (from the University of Wisconsin - Madison) released with the paper [You Only Sample (Almost) Once: Linear Cost Self-Attention Via Bernoulli Sampling](https://arxiv.org/abs/2111.09714) by Zhanpeng Zeng, Yunyang Xiong, Sathya N. Ravi, Shailesh Acharya, Glenn Fung, Vikas Singh.
|
||||||
|
|
||||||
|
|
||||||
|
### Supported frameworks
|
||||||
|
|
||||||
|
The table below represents the current support in the library for each of those models, whether they have a Python
|
||||||
|
tokenizer (called "slow"). A "fast" tokenizer backed by the 🤗 Tokenizers library, whether they have support in Jax (via
|
||||||
|
Flax), PyTorch, and/or TensorFlow.
|
||||||
|
|
||||||
|
<!--This table is updated automatically from the auto modules with _make fix-copies_. Do not update manually!-->
|
||||||
|
|
||||||
|
| Model | Tokenizer slow | Tokenizer fast | PyTorch support | TensorFlow support | Flax Support |
|
||||||
|
|:-----------------------------:|:--------------:|:--------------:|:---------------:|:------------------:|:------------:|
|
||||||
|
| ALBERT | ✅ | ✅ | ✅ | ✅ | ✅ |
|
||||||
|
| ALIGN | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| AltCLIP | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| Audio Spectrogram Transformer | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| Autoformer | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| Bark | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| BART | ✅ | ✅ | ✅ | ✅ | ✅ |
|
||||||
|
| BEiT | ❌ | ❌ | ✅ | ❌ | ✅ |
|
||||||
|
| BERT | ✅ | ✅ | ✅ | ✅ | ✅ |
|
||||||
|
| Bert Generation | ✅ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| BigBird | ✅ | ✅ | ✅ | ❌ | ✅ |
|
||||||
|
| BigBird-Pegasus | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| BioGpt | ✅ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| BiT | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| Blenderbot | ✅ | ✅ | ✅ | ✅ | ✅ |
|
||||||
|
| BlenderbotSmall | ✅ | ✅ | ✅ | ✅ | ✅ |
|
||||||
|
| BLIP | ❌ | ❌ | ✅ | ✅ | ❌ |
|
||||||
|
| BLIP-2 | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| BLOOM | ❌ | ✅ | ✅ | ❌ | ❌ |
|
||||||
|
| BridgeTower | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| CamemBERT | ✅ | ✅ | ✅ | ✅ | ❌ |
|
||||||
|
| CANINE | ✅ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| Chinese-CLIP | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| CLAP | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| CLIP | ✅ | ✅ | ✅ | ✅ | ✅ |
|
||||||
|
| CLIPSeg | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| CodeGen | ✅ | ✅ | ✅ | ❌ | ❌ |
|
||||||
|
| Conditional DETR | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| ConvBERT | ✅ | ✅ | ✅ | ✅ | ❌ |
|
||||||
|
| ConvNeXT | ❌ | ❌ | ✅ | ✅ | ❌ |
|
||||||
|
| ConvNeXTV2 | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| CPM-Ant | ✅ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| CTRL | ✅ | ❌ | ✅ | ✅ | ❌ |
|
||||||
|
| CvT | ❌ | ❌ | ✅ | ✅ | ❌ |
|
||||||
|
| Data2VecAudio | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| Data2VecText | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| Data2VecVision | ❌ | ❌ | ✅ | ✅ | ❌ |
|
||||||
|
| DeBERTa | ✅ | ✅ | ✅ | ✅ | ❌ |
|
||||||
|
| DeBERTa-v2 | ✅ | ✅ | ✅ | ✅ | ❌ |
|
||||||
|
| Decision Transformer | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| Deformable DETR | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| DeiT | ❌ | ❌ | ✅ | ✅ | ❌ |
|
||||||
|
| DETA | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| DETR | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| DiNAT | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| DistilBERT | ✅ | ✅ | ✅ | ✅ | ✅ |
|
||||||
|
| DonutSwin | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| DPR | ✅ | ✅ | ✅ | ✅ | ❌ |
|
||||||
|
| DPT | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| EfficientFormer | ❌ | ❌ | ✅ | ✅ | ❌ |
|
||||||
|
| EfficientNet | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| ELECTRA | ✅ | ✅ | ✅ | ✅ | ✅ |
|
||||||
|
| EnCodec | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| Encoder decoder | ❌ | ❌ | ✅ | ✅ | ✅ |
|
||||||
|
| ERNIE | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| ErnieM | ✅ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| ESM | ✅ | ❌ | ✅ | ✅ | ❌ |
|
||||||
|
| FairSeq Machine-Translation | ✅ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| Falcon | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| FlauBERT | ✅ | ❌ | ✅ | ✅ | ❌ |
|
||||||
|
| FLAVA | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| FNet | ✅ | ✅ | ✅ | ❌ | ❌ |
|
||||||
|
| FocalNet | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| Funnel Transformer | ✅ | ✅ | ✅ | ✅ | ❌ |
|
||||||
|
| GIT | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| GLPN | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| GPT Neo | ❌ | ❌ | ✅ | ❌ | ✅ |
|
||||||
|
| GPT NeoX | ❌ | ✅ | ✅ | ❌ | ❌ |
|
||||||
|
| GPT NeoX Japanese | ✅ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| GPT-J | ❌ | ❌ | ✅ | ✅ | ✅ |
|
||||||
|
| GPT-Sw3 | ✅ | ✅ | ✅ | ✅ | ✅ |
|
||||||
|
| GPTBigCode | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| GPTSAN-japanese | ✅ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| Graphormer | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| GroupViT | ❌ | ❌ | ✅ | ✅ | ❌ |
|
||||||
|
| Hubert | ❌ | ❌ | ✅ | ✅ | ❌ |
|
||||||
|
| I-BERT | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| ImageGPT | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| Informer | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| InstructBLIP | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| Jukebox | ✅ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| LayoutLM | ✅ | ✅ | ✅ | ✅ | ❌ |
|
||||||
|
| LayoutLMv2 | ✅ | ✅ | ✅ | ❌ | ❌ |
|
||||||
|
| LayoutLMv3 | ✅ | ✅ | ✅ | ✅ | ❌ |
|
||||||
|
| LED | ✅ | ✅ | ✅ | ✅ | ❌ |
|
||||||
|
| LeViT | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| LiLT | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| LLaMA | ✅ | ✅ | ✅ | ❌ | ❌ |
|
||||||
|
| Longformer | ✅ | ✅ | ✅ | ✅ | ❌ |
|
||||||
|
| LongT5 | ❌ | ❌ | ✅ | ❌ | ✅ |
|
||||||
|
| LUKE | ✅ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| LXMERT | ✅ | ✅ | ✅ | ✅ | ❌ |
|
||||||
|
| M-CTC-T | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| M2M100 | ✅ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| Marian | ✅ | ❌ | ✅ | ✅ | ✅ |
|
||||||
|
| MarkupLM | ✅ | ✅ | ✅ | ❌ | ❌ |
|
||||||
|
| Mask2Former | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| MaskFormer | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| MaskFormerSwin | ❌ | ❌ | ❌ | ❌ | ❌ |
|
||||||
|
| mBART | ✅ | ✅ | ✅ | ✅ | ✅ |
|
||||||
|
| MEGA | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| Megatron-BERT | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| MGP-STR | ✅ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| MobileBERT | ✅ | ✅ | ✅ | ✅ | ❌ |
|
||||||
|
| MobileNetV1 | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| MobileNetV2 | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| MobileViT | ❌ | ❌ | ✅ | ✅ | ❌ |
|
||||||
|
| MobileViTV2 | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| MPNet | ✅ | ✅ | ✅ | ✅ | ❌ |
|
||||||
|
| MRA | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| MT5 | ✅ | ✅ | ✅ | ✅ | ✅ |
|
||||||
|
| MusicGen | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| MVP | ✅ | ✅ | ✅ | ❌ | ❌ |
|
||||||
|
| NAT | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| Nezha | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| NLLB-MOE | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| Nyströmformer | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| OneFormer | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| OpenAI GPT | ✅ | ✅ | ✅ | ✅ | ❌ |
|
||||||
|
| OpenAI GPT-2 | ✅ | ✅ | ✅ | ✅ | ✅ |
|
||||||
|
| OpenLlama | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| OPT | ❌ | ❌ | ✅ | ✅ | ✅ |
|
||||||
|
| OWL-ViT | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| Pegasus | ✅ | ✅ | ✅ | ✅ | ✅ |
|
||||||
|
| PEGASUS-X | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| Perceiver | ✅ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| Pix2Struct | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| PLBart | ✅ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| PoolFormer | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| ProphetNet | ✅ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| QDQBert | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| RAG | ✅ | ❌ | ✅ | ✅ | ❌ |
|
||||||
|
| REALM | ✅ | ✅ | ✅ | ❌ | ❌ |
|
||||||
|
| Reformer | ✅ | ✅ | ✅ | ❌ | ❌ |
|
||||||
|
| RegNet | ❌ | ❌ | ✅ | ✅ | ✅ |
|
||||||
|
| RemBERT | ✅ | ✅ | ✅ | ✅ | ❌ |
|
||||||
|
| ResNet | ❌ | ❌ | ✅ | ✅ | ✅ |
|
||||||
|
| RetriBERT | ✅ | ✅ | ✅ | ❌ | ❌ |
|
||||||
|
| RoBERTa | ✅ | ✅ | ✅ | ✅ | ✅ |
|
||||||
|
| RoBERTa-PreLayerNorm | ❌ | ❌ | ✅ | ✅ | ✅ |
|
||||||
|
| RoCBert | ✅ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| RoFormer | ✅ | ✅ | ✅ | ✅ | ✅ |
|
||||||
|
| RWKV | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| SAM | ❌ | ❌ | ✅ | ✅ | ❌ |
|
||||||
|
| SegFormer | ❌ | ❌ | ✅ | ✅ | ❌ |
|
||||||
|
| SEW | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| SEW-D | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| Speech Encoder decoder | ❌ | ❌ | ✅ | ❌ | ✅ |
|
||||||
|
| Speech2Text | ✅ | ❌ | ✅ | ✅ | ❌ |
|
||||||
|
| Speech2Text2 | ✅ | ❌ | ❌ | ❌ | ❌ |
|
||||||
|
| SpeechT5 | ✅ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| Splinter | ✅ | ✅ | ✅ | ❌ | ❌ |
|
||||||
|
| SqueezeBERT | ✅ | ✅ | ✅ | ❌ | ❌ |
|
||||||
|
| SwiftFormer | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| Swin Transformer | ❌ | ❌ | ✅ | ✅ | ❌ |
|
||||||
|
| Swin Transformer V2 | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| Swin2SR | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| SwitchTransformers | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| T5 | ✅ | ✅ | ✅ | ✅ | ✅ |
|
||||||
|
| Table Transformer | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| TAPAS | ✅ | ❌ | ✅ | ✅ | ❌ |
|
||||||
|
| Time Series Transformer | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| TimeSformer | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| TimmBackbone | ❌ | ❌ | ❌ | ❌ | ❌ |
|
||||||
|
| Trajectory Transformer | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| Transformer-XL | ✅ | ❌ | ✅ | ✅ | ❌ |
|
||||||
|
| TrOCR | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| TVLT | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| UMT5 | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| UniSpeech | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| UniSpeechSat | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| UPerNet | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| VAN | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| VideoMAE | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| ViLT | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| Vision Encoder decoder | ❌ | ❌ | ✅ | ✅ | ✅ |
|
||||||
|
| VisionTextDualEncoder | ❌ | ❌ | ✅ | ✅ | ✅ |
|
||||||
|
| VisualBERT | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| ViT | ❌ | ❌ | ✅ | ✅ | ✅ |
|
||||||
|
| ViT Hybrid | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| ViTMAE | ❌ | ❌ | ✅ | ✅ | ❌ |
|
||||||
|
| ViTMSN | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| ViViT | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| Wav2Vec2 | ✅ | ❌ | ✅ | ✅ | ✅ |
|
||||||
|
| Wav2Vec2-Conformer | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| WavLM | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| Whisper | ✅ | ✅ | ✅ | ✅ | ✅ |
|
||||||
|
| X-CLIP | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| X-MOD | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| XGLM | ✅ | ✅ | ✅ | ✅ | ✅ |
|
||||||
|
| XLM | ✅ | ❌ | ✅ | ✅ | ❌ |
|
||||||
|
| XLM-ProphetNet | ✅ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| XLM-RoBERTa | ✅ | ✅ | ✅ | ✅ | ✅ |
|
||||||
|
| XLM-RoBERTa-XL | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| XLNet | ✅ | ✅ | ✅ | ✅ | ❌ |
|
||||||
|
| YOLOS | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
| YOSO | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||||
|
|
||||||
|
<!-- End table-->
|
||||||
@ -12,6 +12,10 @@ distributed under the License is distributed on an "AS IS" BASIS,
|
|||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
See the License for the specific language governing permissions and
|
See the License for the specific language governing permissions and
|
||||||
limitations under the License.
|
limitations under the License.
|
||||||
|
|
||||||
|
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||||
|
rendered properly in your Markdown viewer.
|
||||||
|
|
||||||
-->
|
-->
|
||||||
|
|
||||||
# Installation
|
# Installation
|
||||||
@ -54,19 +58,31 @@ pip install transformers
|
|||||||
For CPU-support only, you can conveniently install 🤗 Transformers and a deep learning library in one line. For example, install 🤗 Transformers and PyTorch with:
|
For CPU-support only, you can conveniently install 🤗 Transformers and a deep learning library in one line. For example, install 🤗 Transformers and PyTorch with:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
pip install transformers[torch]
|
pip install 'transformers[torch]'
|
||||||
```
|
```
|
||||||
|
|
||||||
🤗 Transformers and TensorFlow 2.0:
|
🤗 Transformers and TensorFlow 2.0:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
pip install transformers[tf-cpu]
|
pip install 'transformers[tf-cpu]'
|
||||||
```
|
```
|
||||||
|
|
||||||
|
<Tip warning={true}>
|
||||||
|
|
||||||
|
M1 / ARM Users
|
||||||
|
|
||||||
|
You will need to install the following before installing TensorFLow 2.0
|
||||||
|
```
|
||||||
|
brew install cmake
|
||||||
|
brew install pkg-config
|
||||||
|
```
|
||||||
|
|
||||||
|
</Tip>
|
||||||
|
|
||||||
🤗 Transformers and Flax:
|
🤗 Transformers and Flax:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
pip install transformers[flax]
|
pip install 'transformers[flax]'
|
||||||
```
|
```
|
||||||
|
|
||||||
Finally, check if 🤗 Transformers has been properly installed by running the following command. It will download a pretrained model:
|
Finally, check if 🤗 Transformers has been properly installed by running the following command. It will download a pretrained model:
|
||||||
@ -237,4 +253,4 @@ Once your file is downloaded and locally cached, specify it's local path to load
|
|||||||
|
|
||||||
See the [How to download files from the Hub](https://huggingface.co/docs/hub/how-to-downstream) section for more details on downloading files stored on the Hub.
|
See the [How to download files from the Hub](https://huggingface.co/docs/hub/how-to-downstream) section for more details on downloading files stored on the Hub.
|
||||||
|
|
||||||
</Tip>
|
</Tip>
|
||||||
39
docs/source/en/internal/audio_utils.md
Normal file
39
docs/source/en/internal/audio_utils.md
Normal file
@ -0,0 +1,39 @@
|
|||||||
|
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
|
the License. You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
|
specific language governing permissions and limitations under the License.
|
||||||
|
|
||||||
|
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||||
|
rendered properly in your Markdown viewer.
|
||||||
|
|
||||||
|
-->
|
||||||
|
|
||||||
|
# Utilities for `FeatureExtractors`
|
||||||
|
|
||||||
|
This page lists all the utility functions that can be used by the audio [`FeatureExtractor`] in order to compute special features from a raw audio using common algorithms such as *Short Time Fourier Transform* or *log mel spectrogram*.
|
||||||
|
|
||||||
|
Most of those are only useful if you are studying the code of the audio processors in the library.
|
||||||
|
|
||||||
|
## Audio Transformations
|
||||||
|
|
||||||
|
[[autodoc]] audio_utils.hertz_to_mel
|
||||||
|
|
||||||
|
[[autodoc]] audio_utils.mel_to_hertz
|
||||||
|
|
||||||
|
[[autodoc]] audio_utils.mel_filter_bank
|
||||||
|
|
||||||
|
[[autodoc]] audio_utils.optimal_fft_length
|
||||||
|
|
||||||
|
[[autodoc]] audio_utils.window_function
|
||||||
|
|
||||||
|
[[autodoc]] audio_utils.spectrogram
|
||||||
|
|
||||||
|
[[autodoc]] audio_utils.power_to_db
|
||||||
|
|
||||||
|
[[autodoc]] audio_utils.amplitude_to_db
|
||||||
@ -8,6 +8,10 @@ http://www.apache.org/licenses/LICENSE-2.0
|
|||||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
specific language governing permissions and limitations under the License.
|
specific language governing permissions and limitations under the License.
|
||||||
|
|
||||||
|
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||||
|
rendered properly in your Markdown viewer.
|
||||||
|
|
||||||
-->
|
-->
|
||||||
|
|
||||||
# General Utilities
|
# General Utilities
|
||||||
@ -8,25 +8,30 @@ http://www.apache.org/licenses/LICENSE-2.0
|
|||||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
specific language governing permissions and limitations under the License.
|
specific language governing permissions and limitations under the License.
|
||||||
|
|
||||||
|
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||||
|
rendered properly in your Markdown viewer.
|
||||||
|
|
||||||
-->
|
-->
|
||||||
|
|
||||||
# Utilities for Generation
|
# Utilities for Generation
|
||||||
|
|
||||||
This page lists all the utility functions used by [`~generation_utils.GenerationMixin.generate`],
|
This page lists all the utility functions used by [`~generation.GenerationMixin.generate`],
|
||||||
[`~generation_utils.GenerationMixin.greedy_search`],
|
[`~generation.GenerationMixin.greedy_search`],
|
||||||
[`~generation_utils.GenerationMixin.sample`],
|
[`~generation.GenerationMixin.contrastive_search`],
|
||||||
[`~generation_utils.GenerationMixin.beam_search`],
|
[`~generation.GenerationMixin.sample`],
|
||||||
[`~generation_utils.GenerationMixin.beam_sample`],
|
[`~generation.GenerationMixin.beam_search`],
|
||||||
[`~generation_utils.GenerationMixin.group_beam_search`], and
|
[`~generation.GenerationMixin.beam_sample`],
|
||||||
[`~generation_utils.GenerationMixin.constrained_beam_search`].
|
[`~generation.GenerationMixin.group_beam_search`], and
|
||||||
|
[`~generation.GenerationMixin.constrained_beam_search`].
|
||||||
|
|
||||||
Most of those are only useful if you are studying the code of the generate methods in the library.
|
Most of those are only useful if you are studying the code of the generate methods in the library.
|
||||||
|
|
||||||
## Generate Outputs
|
## Generate Outputs
|
||||||
|
|
||||||
The output of [`~generation_utils.GenerationMixin.generate`] is an instance of a subclass of
|
The output of [`~generation.GenerationMixin.generate`] is an instance of a subclass of
|
||||||
[`~utils.ModelOutput`]. This output is a data structure containing all the information returned
|
[`~utils.ModelOutput`]. This output is a data structure containing all the information returned
|
||||||
by [`~generation_utils.GenerationMixin.generate`], but that can also be used as tuple or dictionary.
|
by [`~generation.GenerationMixin.generate`], but that can also be used as tuple or dictionary.
|
||||||
|
|
||||||
Here's an example:
|
Here's an example:
|
||||||
|
|
||||||
@ -40,7 +45,7 @@ inputs = tokenizer("Hello, my dog is cute and ", return_tensors="pt")
|
|||||||
generation_output = model.generate(**inputs, return_dict_in_generate=True, output_scores=True)
|
generation_output = model.generate(**inputs, return_dict_in_generate=True, output_scores=True)
|
||||||
```
|
```
|
||||||
|
|
||||||
The `generation_output` object is a [`~generation_utils.GreedySearchDecoderOnlyOutput`], as we can
|
The `generation_output` object is a [`~generation.GreedySearchDecoderOnlyOutput`], as we can
|
||||||
see in the documentation of that class below, it means it has the following attributes:
|
see in the documentation of that class below, it means it has the following attributes:
|
||||||
|
|
||||||
- `sequences`: the generated sequences of tokens
|
- `sequences`: the generated sequences of tokens
|
||||||
@ -72,31 +77,31 @@ We document here all output types.
|
|||||||
|
|
||||||
### GreedySearchOutput
|
### GreedySearchOutput
|
||||||
|
|
||||||
[[autodoc]] generation_utils.GreedySearchDecoderOnlyOutput
|
[[autodoc]] generation.GreedySearchDecoderOnlyOutput
|
||||||
|
|
||||||
[[autodoc]] generation_utils.GreedySearchEncoderDecoderOutput
|
[[autodoc]] generation.GreedySearchEncoderDecoderOutput
|
||||||
|
|
||||||
[[autodoc]] generation_flax_utils.FlaxGreedySearchOutput
|
[[autodoc]] generation.FlaxGreedySearchOutput
|
||||||
|
|
||||||
### SampleOutput
|
### SampleOutput
|
||||||
|
|
||||||
[[autodoc]] generation_utils.SampleDecoderOnlyOutput
|
[[autodoc]] generation.SampleDecoderOnlyOutput
|
||||||
|
|
||||||
[[autodoc]] generation_utils.SampleEncoderDecoderOutput
|
[[autodoc]] generation.SampleEncoderDecoderOutput
|
||||||
|
|
||||||
[[autodoc]] generation_flax_utils.FlaxSampleOutput
|
[[autodoc]] generation.FlaxSampleOutput
|
||||||
|
|
||||||
### BeamSearchOutput
|
### BeamSearchOutput
|
||||||
|
|
||||||
[[autodoc]] generation_utils.BeamSearchDecoderOnlyOutput
|
[[autodoc]] generation.BeamSearchDecoderOnlyOutput
|
||||||
|
|
||||||
[[autodoc]] generation_utils.BeamSearchEncoderDecoderOutput
|
[[autodoc]] generation.BeamSearchEncoderDecoderOutput
|
||||||
|
|
||||||
### BeamSampleOutput
|
### BeamSampleOutput
|
||||||
|
|
||||||
[[autodoc]] generation_utils.BeamSampleDecoderOnlyOutput
|
[[autodoc]] generation.BeamSampleDecoderOnlyOutput
|
||||||
|
|
||||||
[[autodoc]] generation_utils.BeamSampleEncoderDecoderOutput
|
[[autodoc]] generation.BeamSampleEncoderDecoderOutput
|
||||||
|
|
||||||
## LogitsProcessor
|
## LogitsProcessor
|
||||||
|
|
||||||
@ -115,6 +120,9 @@ generation.
|
|||||||
[[autodoc]] MinLengthLogitsProcessor
|
[[autodoc]] MinLengthLogitsProcessor
|
||||||
- __call__
|
- __call__
|
||||||
|
|
||||||
|
[[autodoc]] MinNewTokensLengthLogitsProcessor
|
||||||
|
- __call__
|
||||||
|
|
||||||
[[autodoc]] TemperatureLogitsWarper
|
[[autodoc]] TemperatureLogitsWarper
|
||||||
- __call__
|
- __call__
|
||||||
|
|
||||||
@ -133,6 +141,9 @@ generation.
|
|||||||
[[autodoc]] NoRepeatNGramLogitsProcessor
|
[[autodoc]] NoRepeatNGramLogitsProcessor
|
||||||
- __call__
|
- __call__
|
||||||
|
|
||||||
|
[[autodoc]] SequenceBiasLogitsProcessor
|
||||||
|
- __call__
|
||||||
|
|
||||||
[[autodoc]] NoBadWordsLogitsProcessor
|
[[autodoc]] NoBadWordsLogitsProcessor
|
||||||
- __call__
|
- __call__
|
||||||
|
|
||||||
@ -261,3 +272,9 @@ A [`Constraint`] can be used to force the generation to include specific tokens
|
|||||||
[[autodoc]] top_k_top_p_filtering
|
[[autodoc]] top_k_top_p_filtering
|
||||||
|
|
||||||
[[autodoc]] tf_top_k_top_p_filtering
|
[[autodoc]] tf_top_k_top_p_filtering
|
||||||
|
|
||||||
|
## Streamers
|
||||||
|
|
||||||
|
[[autodoc]] TextStreamer
|
||||||
|
|
||||||
|
[[autodoc]] TextIteratorStreamer
|
||||||
@ -8,6 +8,10 @@ http://www.apache.org/licenses/LICENSE-2.0
|
|||||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
specific language governing permissions and limitations under the License.
|
specific language governing permissions and limitations under the License.
|
||||||
|
|
||||||
|
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||||
|
rendered properly in your Markdown viewer.
|
||||||
|
|
||||||
-->
|
-->
|
||||||
|
|
||||||
# Utilities for Image Processors
|
# Utilities for Image Processors
|
||||||
@ -19,14 +23,26 @@ Most of those are only useful if you are studying the code of the image processo
|
|||||||
|
|
||||||
## Image Transformations
|
## Image Transformations
|
||||||
|
|
||||||
|
[[autodoc]] image_transforms.center_crop
|
||||||
|
|
||||||
|
[[autodoc]] image_transforms.center_to_corners_format
|
||||||
|
|
||||||
|
[[autodoc]] image_transforms.corners_to_center_format
|
||||||
|
|
||||||
|
[[autodoc]] image_transforms.id_to_rgb
|
||||||
|
|
||||||
[[autodoc]] image_transforms.normalize
|
[[autodoc]] image_transforms.normalize
|
||||||
|
|
||||||
|
[[autodoc]] image_transforms.pad
|
||||||
|
|
||||||
|
[[autodoc]] image_transforms.rgb_to_id
|
||||||
|
|
||||||
[[autodoc]] image_transforms.rescale
|
[[autodoc]] image_transforms.rescale
|
||||||
|
|
||||||
[[autodoc]] image_transforms.resize
|
[[autodoc]] image_transforms.resize
|
||||||
|
|
||||||
[[autodoc]] image_transforms.to_pil_image
|
[[autodoc]] image_transforms.to_pil_image
|
||||||
|
|
||||||
## ImageProcessorMixin
|
## ImageProcessingMixin
|
||||||
|
|
||||||
[[autodoc]] image_processing_utils.ImageProcessorMixin
|
[[autodoc]] image_processing_utils.ImageProcessingMixin
|
||||||
@ -8,6 +8,10 @@ http://www.apache.org/licenses/LICENSE-2.0
|
|||||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
specific language governing permissions and limitations under the License.
|
specific language governing permissions and limitations under the License.
|
||||||
|
|
||||||
|
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||||
|
rendered properly in your Markdown viewer.
|
||||||
|
|
||||||
-->
|
-->
|
||||||
|
|
||||||
# Custom Layers and Utilities
|
# Custom Layers and Utilities
|
||||||
@ -54,9 +58,6 @@ Most of those are only useful if you are studying the code of the models in the
|
|||||||
|
|
||||||
[[autodoc]] modeling_tf_utils.TFConv1D
|
[[autodoc]] modeling_tf_utils.TFConv1D
|
||||||
|
|
||||||
[[autodoc]] modeling_tf_utils.TFSharedEmbeddings
|
|
||||||
- call
|
|
||||||
|
|
||||||
[[autodoc]] modeling_tf_utils.TFSequenceSummary
|
[[autodoc]] modeling_tf_utils.TFSequenceSummary
|
||||||
|
|
||||||
## TensorFlow loss functions
|
## TensorFlow loss functions
|
||||||
@ -8,6 +8,10 @@ http://www.apache.org/licenses/LICENSE-2.0
|
|||||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
specific language governing permissions and limitations under the License.
|
specific language governing permissions and limitations under the License.
|
||||||
|
|
||||||
|
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||||
|
rendered properly in your Markdown viewer.
|
||||||
|
|
||||||
-->
|
-->
|
||||||
|
|
||||||
# Utilities for pipelines
|
# Utilities for pipelines
|
||||||
29
docs/source/en/internal/time_series_utils.md
Normal file
29
docs/source/en/internal/time_series_utils.md
Normal file
@ -0,0 +1,29 @@
|
|||||||
|
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
|
the License. You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
|
specific language governing permissions and limitations under the License.
|
||||||
|
|
||||||
|
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||||
|
rendered properly in your Markdown viewer.
|
||||||
|
|
||||||
|
-->
|
||||||
|
|
||||||
|
# Time Series Utilities
|
||||||
|
|
||||||
|
This page lists all the utility functions and classes that can be used for Time Series based models.
|
||||||
|
|
||||||
|
Most of those are only useful if you are studying the code of the time series models or you wish to add to the collection of distributional output classes.
|
||||||
|
|
||||||
|
## Distributional Output
|
||||||
|
|
||||||
|
[[autodoc]] time_series_utils.NormalOutput
|
||||||
|
|
||||||
|
[[autodoc]] time_series_utils.StudentTOutput
|
||||||
|
|
||||||
|
[[autodoc]] time_series_utils.NegativeBinomialOutput
|
||||||
@ -8,6 +8,10 @@ http://www.apache.org/licenses/LICENSE-2.0
|
|||||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
specific language governing permissions and limitations under the License.
|
specific language governing permissions and limitations under the License.
|
||||||
|
|
||||||
|
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||||
|
rendered properly in your Markdown viewer.
|
||||||
|
|
||||||
-->
|
-->
|
||||||
|
|
||||||
# Utilities for Tokenizers
|
# Utilities for Tokenizers
|
||||||
@ -8,6 +8,10 @@ http://www.apache.org/licenses/LICENSE-2.0
|
|||||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
specific language governing permissions and limitations under the License.
|
specific language governing permissions and limitations under the License.
|
||||||
|
|
||||||
|
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||||
|
rendered properly in your Markdown viewer.
|
||||||
|
|
||||||
-->
|
-->
|
||||||
|
|
||||||
# Utilities for Trainer
|
# Utilities for Trainer
|
||||||
105
docs/source/en/main_classes/agent.md
Normal file
105
docs/source/en/main_classes/agent.md
Normal file
@ -0,0 +1,105 @@
|
|||||||
|
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
|
the License. You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
|
specific language governing permissions and limitations under the License.
|
||||||
|
|
||||||
|
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||||
|
rendered properly in your Markdown viewer.
|
||||||
|
|
||||||
|
-->
|
||||||
|
|
||||||
|
# Agents & Tools
|
||||||
|
|
||||||
|
<Tip warning={true}>
|
||||||
|
|
||||||
|
Transformers Agent is an experimental API which is subject to change at any time. Results returned by the agents
|
||||||
|
can vary as the APIs or underlying models are prone to change.
|
||||||
|
|
||||||
|
</Tip>
|
||||||
|
|
||||||
|
To learn more about agents and tools make sure to read the [introductory guide](../transformers_agents). This page
|
||||||
|
contains the API docs for the underlying classes.
|
||||||
|
|
||||||
|
## Agents
|
||||||
|
|
||||||
|
We provide three types of agents: [`HfAgent`] uses inference endpoints for opensource models, [`LocalAgent`] uses a model of your choice locally and [`OpenAiAgent`] uses OpenAI closed models.
|
||||||
|
|
||||||
|
### HfAgent
|
||||||
|
|
||||||
|
[[autodoc]] HfAgent
|
||||||
|
|
||||||
|
### LocalAgent
|
||||||
|
|
||||||
|
[[autodoc]] LocalAgent
|
||||||
|
|
||||||
|
### OpenAiAgent
|
||||||
|
|
||||||
|
[[autodoc]] OpenAiAgent
|
||||||
|
|
||||||
|
### AzureOpenAiAgent
|
||||||
|
|
||||||
|
[[autodoc]] AzureOpenAiAgent
|
||||||
|
|
||||||
|
### Agent
|
||||||
|
|
||||||
|
[[autodoc]] Agent
|
||||||
|
- chat
|
||||||
|
- run
|
||||||
|
- prepare_for_new_chat
|
||||||
|
|
||||||
|
## Tools
|
||||||
|
|
||||||
|
### load_tool
|
||||||
|
|
||||||
|
[[autodoc]] load_tool
|
||||||
|
|
||||||
|
### Tool
|
||||||
|
|
||||||
|
[[autodoc]] Tool
|
||||||
|
|
||||||
|
### PipelineTool
|
||||||
|
|
||||||
|
[[autodoc]] PipelineTool
|
||||||
|
|
||||||
|
### RemoteTool
|
||||||
|
|
||||||
|
[[autodoc]] RemoteTool
|
||||||
|
|
||||||
|
### launch_gradio_demo
|
||||||
|
|
||||||
|
[[autodoc]] launch_gradio_demo
|
||||||
|
|
||||||
|
## Agent Types
|
||||||
|
|
||||||
|
Agents can handle any type of object in-between tools; tools, being completely multimodal, can accept and return
|
||||||
|
text, image, audio, video, among other types. In order to increase compatibility between tools, as well as to
|
||||||
|
correctly render these returns in ipython (jupyter, colab, ipython notebooks, ...), we implement wrapper classes
|
||||||
|
around these types.
|
||||||
|
|
||||||
|
The wrapped objects should continue behaving as initially; a text object should still behave as a string, an image
|
||||||
|
object should still behave as a `PIL.Image`.
|
||||||
|
|
||||||
|
These types have three specific purposes:
|
||||||
|
|
||||||
|
- Calling `to_raw` on the type should return the underlying object
|
||||||
|
- Calling `to_string` on the type should return the object as a string: that can be the string in case of an `AgentText`
|
||||||
|
but will be the path of the serialized version of the object in other instances
|
||||||
|
- Displaying it in an ipython kernel should display the object correctly
|
||||||
|
|
||||||
|
### AgentText
|
||||||
|
|
||||||
|
[[autodoc]] transformers.tools.agent_types.AgentText
|
||||||
|
|
||||||
|
### AgentImage
|
||||||
|
|
||||||
|
[[autodoc]] transformers.tools.agent_types.AgentImage
|
||||||
|
|
||||||
|
### AgentAudio
|
||||||
|
|
||||||
|
[[autodoc]] transformers.tools.agent_types.AgentAudio
|
||||||
@ -8,6 +8,10 @@ http://www.apache.org/licenses/LICENSE-2.0
|
|||||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
specific language governing permissions and limitations under the License.
|
specific language governing permissions and limitations under the License.
|
||||||
|
|
||||||
|
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||||
|
rendered properly in your Markdown viewer.
|
||||||
|
|
||||||
-->
|
-->
|
||||||
|
|
||||||
# Callbacks
|
# Callbacks
|
||||||
@ -37,6 +41,9 @@ By default a [`Trainer`] will use the following callbacks:
|
|||||||
installed.
|
installed.
|
||||||
- [`~integrations.CodeCarbonCallback`] if [codecarbon](https://pypi.org/project/codecarbon/) is
|
- [`~integrations.CodeCarbonCallback`] if [codecarbon](https://pypi.org/project/codecarbon/) is
|
||||||
installed.
|
installed.
|
||||||
|
- [`~integrations.ClearMLCallback`] if [clearml](https://github.com/allegroai/clearml) is installed.
|
||||||
|
- [`~integrations.DagsHubCallback`] if [dagshub](https://dagshub.com/) is installed.
|
||||||
|
- [`~integrations.FlyteCallback`] if [flyte](https://flyte.org/) is installed.
|
||||||
|
|
||||||
The main class that implements callbacks is [`TrainerCallback`]. It gets the
|
The main class that implements callbacks is [`TrainerCallback`]. It gets the
|
||||||
[`TrainingArguments`] used to instantiate the [`Trainer`], can access that
|
[`TrainingArguments`] used to instantiate the [`Trainer`], can access that
|
||||||
@ -73,6 +80,12 @@ Here is the list of the available [`TrainerCallback`] in the library:
|
|||||||
|
|
||||||
[[autodoc]] integrations.NeptuneCallback
|
[[autodoc]] integrations.NeptuneCallback
|
||||||
|
|
||||||
|
[[autodoc]] integrations.ClearMLCallback
|
||||||
|
|
||||||
|
[[autodoc]] integrations.DagsHubCallback
|
||||||
|
|
||||||
|
[[autodoc]] integrations.FlyteCallback
|
||||||
|
|
||||||
## TrainerCallback
|
## TrainerCallback
|
||||||
|
|
||||||
[[autodoc]] TrainerCallback
|
[[autodoc]] TrainerCallback
|
||||||
@ -8,6 +8,10 @@ http://www.apache.org/licenses/LICENSE-2.0
|
|||||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
specific language governing permissions and limitations under the License.
|
specific language governing permissions and limitations under the License.
|
||||||
|
|
||||||
|
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||||
|
rendered properly in your Markdown viewer.
|
||||||
|
|
||||||
-->
|
-->
|
||||||
|
|
||||||
# Configuration
|
# Configuration
|
||||||
@ -8,6 +8,10 @@ http://www.apache.org/licenses/LICENSE-2.0
|
|||||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
specific language governing permissions and limitations under the License.
|
specific language governing permissions and limitations under the License.
|
||||||
|
|
||||||
|
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||||
|
rendered properly in your Markdown viewer.
|
||||||
|
|
||||||
-->
|
-->
|
||||||
|
|
||||||
# Data Collator
|
# Data Collator
|
||||||
@ -8,6 +8,10 @@ http://www.apache.org/licenses/LICENSE-2.0
|
|||||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
specific language governing permissions and limitations under the License.
|
specific language governing permissions and limitations under the License.
|
||||||
|
|
||||||
|
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||||
|
rendered properly in your Markdown viewer.
|
||||||
|
|
||||||
-->
|
-->
|
||||||
|
|
||||||
# DeepSpeed Integration
|
# DeepSpeed Integration
|
||||||
@ -162,33 +166,24 @@ If after trying everything suggested you still encounter build issues, please, p
|
|||||||
|
|
||||||
### Deployment with multiple GPUs
|
### Deployment with multiple GPUs
|
||||||
|
|
||||||
To deploy this feature with multiple GPUs adjust the [`Trainer`] command line arguments as
|
To deploy the DeepSpeed integration adjust the [`Trainer`] command line arguments to include a new argument `--deepspeed ds_config.json`, where `ds_config.json` is the DeepSpeed configuration file as
|
||||||
following:
|
|
||||||
|
|
||||||
1. replace `python -m torch.distributed.launch` with `deepspeed`.
|
|
||||||
2. add a new argument `--deepspeed ds_config.json`, where `ds_config.json` is the DeepSpeed configuration file as
|
|
||||||
documented [here](https://www.deepspeed.ai/docs/config-json/). The file naming is up to you.
|
documented [here](https://www.deepspeed.ai/docs/config-json/). The file naming is up to you.
|
||||||
|
|
||||||
Therefore, if your original command line looked as follows:
|
You can use a launcher of your choice here. You can continue using the pytorch launcher:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
python -m torch.distributed.launch --nproc_per_node=2 your_program.py <normal cl args>
|
torch.distributed.run --nproc_per_node=2 your_program.py <normal cl args> --deepspeed ds_config.json
|
||||||
```
|
```
|
||||||
|
or use the launcher provided by `deepspeed`:
|
||||||
Now it should be:
|
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
deepspeed --num_gpus=2 your_program.py <normal cl args> --deepspeed ds_config.json
|
deepspeed --num_gpus=2 your_program.py <normal cl args> --deepspeed ds_config.json
|
||||||
```
|
```
|
||||||
|
|
||||||
Unlike, `torch.distributed.launch` where you have to specify how many GPUs to use with `--nproc_per_node`, with the
|
As you can see the arguments aren't the same, but for most needs either of them works. The
|
||||||
`deepspeed` launcher you don't have to use the corresponding `--num_gpus` if you want all of your GPUs used. The
|
|
||||||
full details on how to configure various nodes and GPUs can be found [here](https://www.deepspeed.ai/getting-started/#resource-configuration-multi-node).
|
full details on how to configure various nodes and GPUs can be found [here](https://www.deepspeed.ai/getting-started/#resource-configuration-multi-node).
|
||||||
|
|
||||||
In fact, you can continue using `-m torch.distributed.launch` with DeepSpeed as long as you don't need to use
|
When you use the `deepspeed` launcher and you want to use all available gpus you can just omit the `--num_gpus` flag.
|
||||||
`deepspeed` launcher-specific arguments. Typically if you don't need a multi-node setup you're not required to use
|
|
||||||
the `deepspeed` launcher. But since in the DeepSpeed documentation it'll be used everywhere, for consistency we will
|
|
||||||
use it here as well.
|
|
||||||
|
|
||||||
Here is an example of running `run_translation.py` under DeepSpeed deploying all available GPUs:
|
Here is an example of running `run_translation.py` under DeepSpeed deploying all available GPUs:
|
||||||
|
|
||||||
@ -282,6 +277,95 @@ Notes:
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
<a id='deepspeed-multi-node'></a>
|
||||||
|
|
||||||
|
### Deployment with multiple Nodes
|
||||||
|
|
||||||
|
The information in this section isn't not specific to the DeepSpeed integration and is applicable to any multi-node program. But DeepSpeed provides a `deepspeed` launcher that is easier to use than other launchers unless you are in a SLURM environment.
|
||||||
|
|
||||||
|
For the duration of this section let's assume that you have 2 nodes with 8 gpus each. And you can reach the first node with `ssh hostname1` and second node with `ssh hostname2`, and both must be able to reach each other via ssh locally without a password. Of course, you will need to rename these host (node) names to the actual host names you are working with.
|
||||||
|
|
||||||
|
#### The torch.distributed.run launcher
|
||||||
|
|
||||||
|
|
||||||
|
For example, to use `torch.distributed.run`, you could do:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
python -m torch.distributed.run --nproc_per_node=8 --nnode=2 --node_rank=0 --master_addr=hostname1 \
|
||||||
|
--master_port=9901 your_program.py <normal cl args> --deepspeed ds_config.json
|
||||||
|
```
|
||||||
|
|
||||||
|
You have to ssh to each node and run this same command on each one of them! There is no rush, the launcher will wait until both nodes will synchronize.
|
||||||
|
|
||||||
|
For more information please see [torchrun](https://pytorch.org/docs/stable/elastic/run.html). Incidentally, this is also the launcher that replaced `torch.distributed.launch` a few pytorch versions back.
|
||||||
|
|
||||||
|
|
||||||
|
#### The deepspeed launcher
|
||||||
|
|
||||||
|
To use the `deepspeed` launcher instead, you have to first create a `hostfile` file:
|
||||||
|
|
||||||
|
```
|
||||||
|
hostname1 slots=8
|
||||||
|
hostname2 slots=8
|
||||||
|
```
|
||||||
|
and then you can launch it as:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
deepspeed --num_gpus 8 --num_nodes 2 --hostfile hostfile --master_addr hostname1 --master_port=9901 \
|
||||||
|
your_program.py <normal cl args> --deepspeed ds_config.json
|
||||||
|
```
|
||||||
|
|
||||||
|
Unlike the `torch.distributed.run` launcher, `deepspeed` will automatically launch this command on both nodes!
|
||||||
|
|
||||||
|
For more information please see [Resource Configuration (multi-node)](https://www.deepspeed.ai/getting-started/#resource-configuration-multi-node).
|
||||||
|
|
||||||
|
|
||||||
|
#### Launching in a SLURM environment
|
||||||
|
|
||||||
|
In the SLURM environment the following approach can be used. The following is a slurm script `launch.slurm` which you will need to adapt it to your specific SLURM environment.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
#SBATCH --job-name=test-nodes # name
|
||||||
|
#SBATCH --nodes=2 # nodes
|
||||||
|
#SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
|
||||||
|
#SBATCH --cpus-per-task=10 # number of cores per tasks
|
||||||
|
#SBATCH --gres=gpu:8 # number of gpus
|
||||||
|
#SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS)
|
||||||
|
#SBATCH --output=%x-%j.out # output file name
|
||||||
|
|
||||||
|
export GPUS_PER_NODE=8
|
||||||
|
export MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
|
||||||
|
export MASTER_PORT=9901
|
||||||
|
|
||||||
|
srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
|
||||||
|
--nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
|
||||||
|
--master_addr $MASTER_ADDR --master_port $MASTER_PORT \
|
||||||
|
your_program.py <normal cl args> --deepspeed ds_config.json'
|
||||||
|
```
|
||||||
|
|
||||||
|
All is left is to schedule it to run:
|
||||||
|
```bash
|
||||||
|
sbatch launch.slurm
|
||||||
|
```
|
||||||
|
|
||||||
|
`srun` will take care of launching the program simultaneously on all nodes.
|
||||||
|
|
||||||
|
|
||||||
|
#### Use of Non-shared filesystem
|
||||||
|
|
||||||
|
By default DeepSpeed expects that a multi-node environment uses a shared storage. If this is not the case and each node can only see the local filesystem, you need to adjust the config file to include a [`checkpoint`_section](https://www.deepspeed.ai/docs/config-json/#checkpoint-options) with the following setting:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"checkpoint": {
|
||||||
|
"use_node_local_storage": true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Alternatively, you can also use the [`Trainer`]'s `--save_on_each_node` argument, and the above config will be added automatically for you.
|
||||||
|
|
||||||
|
|
||||||
<a id='deepspeed-notebook'></a>
|
<a id='deepspeed-notebook'></a>
|
||||||
|
|
||||||
### Deployment in Notebooks
|
### Deployment in Notebooks
|
||||||
@ -680,7 +764,7 @@ time. "reuse distance" is a metric we are using to figure out when will a parame
|
|||||||
use the `stage3_max_reuse_distance` to decide whether to throw away the parameter or to keep it. If a parameter is
|
use the `stage3_max_reuse_distance` to decide whether to throw away the parameter or to keep it. If a parameter is
|
||||||
going to be used again in near future (less than `stage3_max_reuse_distance`) then we keep it to reduce communication
|
going to be used again in near future (less than `stage3_max_reuse_distance`) then we keep it to reduce communication
|
||||||
overhead. This is super helpful when you have activation checkpointing enabled, where we do a forward recompute and
|
overhead. This is super helpful when you have activation checkpointing enabled, where we do a forward recompute and
|
||||||
backward passes a a single layer granularity and want to keep the parameter in the forward recompute till the backward
|
backward passes a single layer granularity and want to keep the parameter in the forward recompute till the backward
|
||||||
|
|
||||||
The following configuration values depend on the model's hidden size:
|
The following configuration values depend on the model's hidden size:
|
||||||
|
|
||||||
@ -715,6 +799,39 @@ default value in the following cases:
|
|||||||
the increased data buffers.
|
the increased data buffers.
|
||||||
|
|
||||||
|
|
||||||
|
#### ZeRO-0 Config
|
||||||
|
|
||||||
|
Note that we're listing Stage 0 and 1 last since they are rarely used.
|
||||||
|
|
||||||
|
Stage 0 is disabling all types of sharding and just using DeepSpeed as DDP. You can turn it on with:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"zero_optimization": {
|
||||||
|
"stage": 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
This will essentially disable ZeRO without you needing to change anything else.
|
||||||
|
|
||||||
|
|
||||||
|
#### ZeRO-1 Config
|
||||||
|
|
||||||
|
|
||||||
|
Stage 1 is Stage 2 minus gradient sharding. You can always try it to speed things a tiny bit to only shard the optimizer states with:
|
||||||
|
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"zero_optimization": {
|
||||||
|
"stage": 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
<a id='deepspeed-nvme'></a>
|
<a id='deepspeed-nvme'></a>
|
||||||
|
|
||||||
### NVMe Support
|
### NVMe Support
|
||||||
@ -1037,6 +1154,68 @@ values look like, but we highly recommend using the one with multiple `auto` set
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
#### How to Choose Which ZeRO Stage and Offloads To Use For Best Performance
|
||||||
|
|
||||||
|
So now you know there are all these different stages. How to decide which of them to use? This section will attempt to address this question.
|
||||||
|
|
||||||
|
In general the following applies:
|
||||||
|
|
||||||
|
- Speed-wise (left is faster than right)
|
||||||
|
|
||||||
|
Stage 0 (DDP) > Stage 1 > Stage 2 > Stage 2 + offload > Stage 3 > Stage 3 + offloads
|
||||||
|
|
||||||
|
- GPU Memory usage-wise (right is more GPU memory efficient than left)
|
||||||
|
|
||||||
|
Stage 0 (DDP) < Stage 1 < Stage 2 < Stage 2 + offload < Stage 3 < Stage 3 + offloads
|
||||||
|
|
||||||
|
So when you want to get the fastest execution while fitting into minimal number of GPUs, here is the process you could follow. We start with the fastest approach and if running into GPU OOM we then go to the next slower approach, but which will use less GPU memory. And so on and so forth.
|
||||||
|
|
||||||
|
First of all set batch size to 1 (you can always use gradient accumulation for any desired effective batch size).
|
||||||
|
|
||||||
|
1. Enable `--gradient_checkpointing 1` (HF Trainer) or directly `model.gradient_checkpointing_enable()` - if OOM then
|
||||||
|
2. Try ZeRO stage 2 first. if OOM then
|
||||||
|
3. Try ZeRO stage 2 + `offload_optimizer` - if OOM then
|
||||||
|
4. Switch to ZeRO stage 3 - if OOM then
|
||||||
|
5. Enable `offload_param` to `cpu` - if OOM then
|
||||||
|
6. Enable `offload_optimizer` to `cpu` - if OOM then
|
||||||
|
|
||||||
|
7. If you still can't fit a batch size of 1 first check various default values and lower them if you can. For example, if you use `generate` and you don't use a wide search beam make it narrower as it'd take a lot of memory.
|
||||||
|
|
||||||
|
8. Definitely use mixed half-precision over fp32 - so bf16 on Ampere and higher GPUs and fp16 on older gpu architectures.
|
||||||
|
|
||||||
|
9. If you still OOM you could add more hardware or enable ZeRO-Infinity - that is switch offloads `offload_param` and `offload_optimizer` to `nvme`. You need to make sure it's a very fast nvme. As an anecdote I was able to infer BLOOM-176B on a tiny GPU using ZeRO-Infinity except it was extremely slow. But it worked!
|
||||||
|
|
||||||
|
You can, of course, work through these steps in reverse by starting with the most GPU memory efficient config and then going backwards. Or try bi-secting it.
|
||||||
|
|
||||||
|
Once you have your batch size 1 not leading to OOM, measure your effective throughput.
|
||||||
|
|
||||||
|
Next try to increase the batch size to as large as you can, since the higher the batch size the more efficient the GPUs are as they perform the best when matrices they multiply are huge.
|
||||||
|
|
||||||
|
Now the performance optimization game starts. You can turn off some offload features or step down in ZeRO stages and increase/decrease batch size and again measure your effective throughput. Rinse and repeat until satisfied.
|
||||||
|
|
||||||
|
Don't spend forever on it, but if you're about to start a 3 months training - do spend a few days on it to find the most effective throughput-wise setup. So that your training cost will be the lowest and you will finish training faster. In the current crazy-paced ML world, if it takes you an extra month to train something you are likely to miss a golden opportunity. Of course, this is only me sharing an observation and in no way I'm trying to rush you. Before beginning to train BLOOM-176B I spent 2 days on this process and was able to increase throughput from 90 to 150 TFLOPs! This effort saved us more than one month of training time.
|
||||||
|
|
||||||
|
These notes were written primarily for the training mode, but they should mostly apply for inference as well. For example, during inference Gradient Checkpointing is a no-op since it is only useful during training. Additionally, we found out that if you are doing a multi-GPU inference and not using [DeepSpeed-Inference](https://www.deepspeed.ai/tutorials/inference-tutorial/), [Accelerate](https://huggingface.co/blog/bloom-inference-pytorch-scripts) should provide a superior performance.
|
||||||
|
|
||||||
|
|
||||||
|
Other quick related performance notes:
|
||||||
|
- if you are training something from scratch always try to have tensors with shapes that are divisible by 16 (e.g. hidden size). For batch size try divisible by 2 at least. There are [wave and tile quanitization](https://developer.nvidia.com/blog/optimizing-gpu-performance-tensor-cores/) divisibility that is hardware-specific if you want to squeeze even higher performance from your GPUs.
|
||||||
|
|
||||||
|
|
||||||
|
### Activation Checkpointing or Gradient Checkpointing
|
||||||
|
|
||||||
|
Activation checkpointing and gradient checkpointing are two distinct terms that refer to the same methodology. It's very confusing but this is how it is.
|
||||||
|
|
||||||
|
Gradient checkpointing allows one to trade speed for GPU memory, which either allows one to overcome a GPU OOM, or increase their batch size, which often leads to a better performance.
|
||||||
|
|
||||||
|
HF Transformers models don't know anything about DeepSpeed's activation checkpointing, so if you try to enable that feature in the DeepSpeed config file, nothing will happen.
|
||||||
|
|
||||||
|
Therefore you have two ways to take advantage of this very beneficial feature:
|
||||||
|
|
||||||
|
1. If you want to use a HF Transformers models you can do `model.gradient_checkpointing_enable()` or use `--gradient_checkpointing` in the HF Trainer, which will automatically enable this for you. `torch.utils.checkpoint` is used there.
|
||||||
|
2. If you write your own model and you want to use DeepSpeed's activation checkpointing you can use the [API prescribed there](https://deepspeed.readthedocs.io/en/latest/activation-checkpointing.html). You can also take the HF Transformers modeling code and replace `torch.utils.checkpoint` with the DeepSpeed's API. The latter is more flexible since it allows you to offload the forward activations to the CPU memory instead of recalculating them.
|
||||||
|
|
||||||
|
|
||||||
### Optimizer and Scheduler
|
### Optimizer and Scheduler
|
||||||
|
|
||||||
As long as you don't enable `offload_optimizer` you can mix and match DeepSpeed and HuggingFace schedulers and
|
As long as you don't enable `offload_optimizer` you can mix and match DeepSpeed and HuggingFace schedulers and
|
||||||
@ -1118,8 +1297,17 @@ If you want to use another optimizer which is not listed above, you will have to
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
Similarly to `AdamW`, you can configure other officially supported optimizers. Just remember that may have different
|
Similarly to `AdamW`, you can configure other officially supported optimizers. Just remember that those may have different config values. e.g. for Adam you will want `weight_decay` around `0.01`.
|
||||||
config values. e.g. for Adam you will want `weight_decay` around `0.01`.
|
|
||||||
|
Additionally, offload works the best when it's used with Deepspeed's CPU Adam optimizer. If you want to use a different optimizer with offload, since `deepspeed==0.8.3` you need to also add:
|
||||||
|
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"zero_force_ds_cpu_optimizer": false
|
||||||
|
}
|
||||||
|
```
|
||||||
|
to the top level configuration.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@ -1316,9 +1504,32 @@ As of `deepspeed==0.6.0` the bf16 support is new and experimental.
|
|||||||
|
|
||||||
If you use [gradient accumulation](#gradient-accumulation) with bf16-enabled, you need to be aware that it'll accumulate gradients in bf16, which may not be what you want due to this format's low precision, as it may lead to a lossy accumulation.
|
If you use [gradient accumulation](#gradient-accumulation) with bf16-enabled, you need to be aware that it'll accumulate gradients in bf16, which may not be what you want due to this format's low precision, as it may lead to a lossy accumulation.
|
||||||
|
|
||||||
|
A work is being done to fix that and provide an option to use a higher precision `dtype` (fp16 or fp32).
|
||||||
|
|
||||||
</Tip>
|
</Tip>
|
||||||
|
|
||||||
|
|
||||||
|
### NCCL Collectives
|
||||||
|
|
||||||
|
There is the `dtype` of the training regime and there is a separate `dtype` that is used for communication collectives like various reduction and gathering/scattering operations.
|
||||||
|
|
||||||
|
All gather/scatter ops are performed in the same `dtype` the data is in, so if you're using bf16 training regime it gets gathered in bf16 - gathering is a non-lossy operation.
|
||||||
|
|
||||||
|
Various reduce operations can be quite lossy, for example when gradients are averaged across multiple-gpus, if the communications are done in fp16 or bf16 the outcome is likely be lossy - since when one ads multiple numbers in low precision the result isn't exact. More so with bf16 as it has a lower precision than fp16. Often fp16 is good enough as the loss is minimal when averaging grads which are typically very small. Therefore, by default for half precision training fp16 is used as the default for reduction operations. But you have full control over this functionality and if you choose you can add a small overhead and ensure that reductions will be using fp32 as the accumulation dtype and only when the result is ready it'll get downcast to the half precision `dtype` you're training in.
|
||||||
|
|
||||||
|
In order to override the default you simply add a new configuration entry:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"communication_data_type": "fp32"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
The valid values as of this writing are "fp16", "bfp16", "fp32".
|
||||||
|
|
||||||
|
note: stage zero 3 had a bug with regards to bf16 comm dtype that was fixed in `deepspeed==0.8.1`
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
### apex
|
### apex
|
||||||
|
|
||||||
To configure apex AMP-like mode set:
|
To configure apex AMP-like mode set:
|
||||||
@ -1499,7 +1710,7 @@ fp32_model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
|
|||||||
|
|
||||||
<Tip>
|
<Tip>
|
||||||
|
|
||||||
Note, that once `load_state_dict_from_zero_checkpoint` was run, the `model` will no longer be useable in the
|
Note, that once `load_state_dict_from_zero_checkpoint` was run, the `model` will no longer be usable in the
|
||||||
DeepSpeed context of the same application. i.e. you will need to re-initialize the deepspeed engine, since
|
DeepSpeed context of the same application. i.e. you will need to re-initialize the deepspeed engine, since
|
||||||
`model.load_state_dict(state_dict)` will remove all the DeepSpeed magic from it. So do this only at the very end
|
`model.load_state_dict(state_dict)` will remove all the DeepSpeed magic from it. So do this only at the very end
|
||||||
of the training.
|
of the training.
|
||||||
@ -2061,6 +2272,32 @@ rank1:
|
|||||||
|
|
||||||
This was a very basic example and you will want to adapt it to your needs.
|
This was a very basic example and you will want to adapt it to your needs.
|
||||||
|
|
||||||
|
### `generate` nuances
|
||||||
|
|
||||||
|
When using multiple GPUs with ZeRO Stage-3, one has to synchronize the GPUs by calling `generate(..., synced_gpus=True)`. If this is not done if one GPU finished generating before other GPUs the whole system will hang as the rest of the GPUs will not be able to received the shard of weights from the GPU that stopped generating.
|
||||||
|
|
||||||
|
Starting from `transformers>=4.28`, if `synced_gpus` isn't explicitly specified, it'll be set to `True` automatically if these conditions are detected. But you can still override the value of `synced_gpus` if need to.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
## Testing Deepspeed Integration
|
||||||
|
|
||||||
|
If you submit a PR that involves DeepSpeed integration please note our CircleCI PR CI setup has no GPUs, so we only run tests requiring gpus on a different CI nightly. Therefore if you get a green CI report in your PR it doesn't mean DeepSpeed tests pass.
|
||||||
|
|
||||||
|
To run DeepSpeed tests, please run at least:
|
||||||
|
|
||||||
|
```
|
||||||
|
RUN_SLOW=1 pytest tests/deepspeed/test_deepspeed.py
|
||||||
|
```
|
||||||
|
|
||||||
|
If you changed any of the modeling or pytorch examples code, then run the model zoo tests as well. The following will run all DeepSpeed tests:
|
||||||
|
|
||||||
|
```
|
||||||
|
RUN_SLOW=1 pytest tests/deepspeed
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
## Main DeepSpeed Resources
|
## Main DeepSpeed Resources
|
||||||
|
|
||||||
@ -8,6 +8,10 @@ http://www.apache.org/licenses/LICENSE-2.0
|
|||||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
specific language governing permissions and limitations under the License.
|
specific language governing permissions and limitations under the License.
|
||||||
|
|
||||||
|
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||||
|
rendered properly in your Markdown viewer.
|
||||||
|
|
||||||
-->
|
-->
|
||||||
|
|
||||||
# Feature Extractor
|
# Feature Extractor
|
||||||
34
docs/source/en/main_classes/image_processor.md
Normal file
34
docs/source/en/main_classes/image_processor.md
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
|
the License. You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
|
specific language governing permissions and limitations under the License.
|
||||||
|
|
||||||
|
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||||
|
rendered properly in your Markdown viewer.
|
||||||
|
|
||||||
|
-->
|
||||||
|
|
||||||
|
# Image Processor
|
||||||
|
|
||||||
|
An image processor is in charge of preparing input features for vision models and post processing their outputs. This includes transformations such as resizing, normalization, and conversion to PyTorch, TensorFlow, Flax and Numpy tensors. It may also include model specific post-processing such as converting logits to segmentation masks.
|
||||||
|
|
||||||
|
|
||||||
|
## ImageProcessingMixin
|
||||||
|
|
||||||
|
[[autodoc]] image_processing_utils.ImageProcessingMixin
|
||||||
|
- from_pretrained
|
||||||
|
- save_pretrained
|
||||||
|
|
||||||
|
## BatchFeature
|
||||||
|
|
||||||
|
[[autodoc]] BatchFeature
|
||||||
|
|
||||||
|
## BaseImageProcessor
|
||||||
|
|
||||||
|
[[autodoc]] image_processing_utils.BaseImageProcessor
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user