mirror of
https://github.com/huggingface/transformers.git
synced 2025-11-03 03:14:36 +08:00
Compare commits
991 Commits
v4.14.1
...
v4.18-rele
| Author | SHA1 | Date | |
|---|---|---|---|
| 31ec2cb2ba | |||
| b9bf91a970 | |||
| b1a7dfe099 | |||
| 2aef4cfe58 | |||
| 8d57c424e0 | |||
| c65633156b | |||
| fb3d0df454 | |||
| ae6a7a763b | |||
| 47c5c05932 | |||
| a2b7d19bd7 | |||
| 0bf18643f4 | |||
| d55fcbcc50 | |||
| b18dfd95e1 | |||
| 208f4c109a | |||
| f553c3ce4c | |||
| 23fc4cba0d | |||
| b33ab4eb59 | |||
| 9fd5e6bbe6 | |||
| 367558b90d | |||
| 4354005291 | |||
| 7ccacdf10f | |||
| 21decb7731 | |||
| 8bf6d28c10 | |||
| 02214cb3cc | |||
| 765bafb8e4 | |||
| 104c065277 | |||
| 1cd2e21d1b | |||
| 6f9d8dc156 | |||
| dad5ca83b2 | |||
| be9474bd35 | |||
| 24a85cca61 | |||
| b9a768b3ff | |||
| 3951b9f390 | |||
| ec4da72fe9 | |||
| 013a7dbe3d | |||
| ad0cba08ea | |||
| 60d27b1f15 | |||
| 53a4d6b115 | |||
| 61ee26a892 | |||
| 2199382dfd | |||
| 823dbf8a41 | |||
| 5fe06b9bdd | |||
| 9947dd077c | |||
| 59a9c83e40 | |||
| afc5a1ea3a | |||
| 483a9450a0 | |||
| 9de70f213e | |||
| bfeff6cc6a | |||
| 5807054bd3 | |||
| e4b234834a | |||
| b808d8a596 | |||
| 05b4c32908 | |||
| 6a4dbba1a3 | |||
| c551addeb0 | |||
| 98939e6aee | |||
| 99a01423b9 | |||
| a8b6443e06 | |||
| 857eb87cc4 | |||
| 81ac45f85c | |||
| a73281e3e4 | |||
| c4deb7b3ae | |||
| c2f8eaf6bc | |||
| ae189ef991 | |||
| d04adc3521 | |||
| 147c816685 | |||
| ffd19ee1de | |||
| 277d49a590 | |||
| 2b483230a1 | |||
| ee18d4d2a9 | |||
| d7c8ce57d4 | |||
| 781af7362b | |||
| 5b40a37bc4 | |||
| 7a9ef8181c | |||
| 0540d1b6c0 | |||
| 875e07a9e3 | |||
| 6358a4c8ec | |||
| 3015d12bfb | |||
| b62ac4d240 | |||
| 86cff21cf6 | |||
| aebca696af | |||
| 45abb37ac9 | |||
| 5216607f8a | |||
| ed31ab3f10 | |||
| 85295621f1 | |||
| c85547af2b | |||
| da936942b0 | |||
| 979b039c89 | |||
| 7ca4633555 | |||
| e0ac72b7bd | |||
| 473709fc76 | |||
| 8049dfa427 | |||
| 925fc57b70 | |||
| 7ecbb9c5e4 | |||
| f6f6866e9e | |||
| c88ff66cc8 | |||
| 342ff6eb41 | |||
| e02f95b229 | |||
| 3dc8242716 | |||
| b320d87ece | |||
| ed2ee373d0 | |||
| aa4c0a86dc | |||
| e231c72906 | |||
| a97f3150c4 | |||
| b473617d63 | |||
| 7fa7408b26 | |||
| 867f3950fa | |||
| 7e7490473e | |||
| 088c1880b7 | |||
| 2b23e0801a | |||
| aa6cfe9c4b | |||
| 70a9bc69a8 | |||
| cae394c8fa | |||
| 4e0f583eea | |||
| 3a0f1684c3 | |||
| 8cbd9b8fb1 | |||
| 9d88be5778 | |||
| f571dc20ac | |||
| 41bfc1e262 | |||
| 23a75a5338 | |||
| 2a27c80063 | |||
| f5e8c9bdea | |||
| 9badcecf69 | |||
| 77c5a80536 | |||
| 5f0d07b36b | |||
| 1cf28da66d | |||
| 029b0d95ed | |||
| aff9bc405a | |||
| c595b6e6a9 | |||
| 8a69e023bf | |||
| 9e8c37dc82 | |||
| 12428f0ef1 | |||
| 1dfc11e9e0 | |||
| bb3a1d345a | |||
| 935330ddfd | |||
| a220f160e0 | |||
| 4975002df5 | |||
| 7135603423 | |||
| eca77f4719 | |||
| 7732148124 | |||
| 62cbd8423b | |||
| 4f6c938342 | |||
| ec3aace0ae | |||
| c30798ec9d | |||
| d49f8d3189 | |||
| a2379b9257 | |||
| 87a9af533c | |||
| 7b262b9692 | |||
| deb61e5f07 | |||
| 7cc2c9c6b0 | |||
| 7865f4d01f | |||
| 0c55d47cde | |||
| df32b5d89b | |||
| 0aac9ba2da | |||
| 9fef668338 | |||
| f9387c948d | |||
| 96cd5bcbb9 | |||
| e226a24f84 | |||
| 6f1727d83a | |||
| 7643b1caa6 | |||
| c77092a5ed | |||
| 4b2774832d | |||
| 5a42bb431e | |||
| fbb454307d | |||
| f0c00d8ca9 | |||
| 94be424308 | |||
| 250b478a2c | |||
| 9ad77affee | |||
| d50f62f2de | |||
| 460f36d352 | |||
| 2afe9cd279 | |||
| 3f0f75e497 | |||
| c6f7ea194b | |||
| abf3cc7064 | |||
| 641e5f3f55 | |||
| f393868073 | |||
| 4ecb022eb1 | |||
| 8bbd41369f | |||
| c36b856580 | |||
| c1af180dfe | |||
| 319cbbe191 | |||
| 0a5ef036e6 | |||
| f466936476 | |||
| 8d7420768c | |||
| ffc319e7b8 | |||
| 277fc2cc78 | |||
| 75c666b4a8 | |||
| d481b6414d | |||
| c03b6e4259 | |||
| fdc2e643c3 | |||
| b25b92ac4f | |||
| 5709a20416 | |||
| ddbc9ae00b | |||
| a6271967c9 | |||
| cb2b0276b6 | |||
| ecb4662d17 | |||
| ec4e421b7d | |||
| 12d1f07770 | |||
| 47cccb5318 | |||
| 8a96b0f10a | |||
| 632ff3c39e | |||
| b6e06c845f | |||
| 1c1e377e99 | |||
| 81643edda5 | |||
| 93d3fd8645 | |||
| 8481ecefbd | |||
| 5a6b3ccd28 | |||
| abd503d939 | |||
| d9b8d1a9f5 | |||
| 7e0d04bed1 | |||
| e1da89ccb8 | |||
| e5101c2e27 | |||
| 25b8f9a85b | |||
| 03c14a515f | |||
| 73f0a5d1f6 | |||
| 76c74b37c1 | |||
| 99e2982f3e | |||
| 2410d0f8ed | |||
| 667b823b89 | |||
| 204c54d411 | |||
| 190994573a | |||
| 09013efdf1 | |||
| 36f8c42519 | |||
| 5bdf3313ef | |||
| 0b8b06185d | |||
| f06c2c2ba1 | |||
| d35e0c6247 | |||
| 8cc925a241 | |||
| 0f35cda459 | |||
| ee27b3d7df | |||
| c2dc89be62 | |||
| 99fd3eb4a5 | |||
| db4dd44ae3 | |||
| ea05d67164 | |||
| 88f7c564f0 | |||
| 16399d6197 | |||
| 015de6f081 | |||
| a23a7c0cd6 | |||
| 366c18f473 | |||
| 79465ac521 | |||
| a78565b7aa | |||
| 4f4e5ddbcb | |||
| 4a353cacb7 | |||
| c1c17bd0b3 | |||
| 611d3a09b2 | |||
| 0d7322c1b7 | |||
| cd4c5c9060 | |||
| e5bc438cc8 | |||
| bcaf566038 | |||
| 8bfd2fb8f0 | |||
| daa4944759 | |||
| 57713443de | |||
| cd1ffb40bf | |||
| 5664d27622 | |||
| 5a386fb05c | |||
| a7aca42fc4 | |||
| 0a057201a9 | |||
| 8f3ea7a1e1 | |||
| e3008c679f | |||
| 6458236181 | |||
| 37793259bb | |||
| e109edf16f | |||
| 0dcdfe8630 | |||
| f86235ad1b | |||
| c1000e703b | |||
| 10cf1ffdbf | |||
| 6db8693086 | |||
| 5493c10ecb | |||
| 6c2f3ed74c | |||
| 37a9fc49f2 | |||
| 3f8360a7b6 | |||
| 97e32b7854 | |||
| d02bd4f333 | |||
| c8c8c114a3 | |||
| 72ae06b904 | |||
| 1d43933fbc | |||
| efd6e9a82a | |||
| 9fd584e544 | |||
| f284aa320d | |||
| e3645fd280 | |||
| 5dbf36bd4e | |||
| 923c35b5c5 | |||
| 9e9f6b8a45 | |||
| 2de99e6c43 | |||
| 802984ad42 | |||
| 6e1e88fd38 | |||
| 20ab1582cf | |||
| 65cf33e7e5 | |||
| 841620684b | |||
| 62b05b6917 | |||
| 9042dfe35c | |||
| 3e9d0f7f59 | |||
| 580dd87c55 | |||
| c1f209dadd | |||
| ae2dd42be5 | |||
| cb5e50c8c2 | |||
| eaed6897da | |||
| a01fe4cd32 | |||
| 7f3d4440d6 | |||
| 5b4c97d09d | |||
| 9442b3ce31 | |||
| 322c8533d7 | |||
| 7e00247fad | |||
| 5d2fed2e8c | |||
| bb69d154c5 | |||
| f7708e1bed | |||
| ecf989ca73 | |||
| 0868fdef85 | |||
| 5b369dc5d8 | |||
| f5741bcd02 | |||
| b6bdb943b2 | |||
| 96ac7549cb | |||
| 6b09328368 | |||
| ba21001f4c | |||
| e66743e6c9 | |||
| 741e49305d | |||
| b7018abf3c | |||
| 19597998f6 | |||
| 10591399d6 | |||
| 2f463effb3 | |||
| 1da84ae02c | |||
| b2a1c994cb | |||
| 8d83ebdf18 | |||
| 6ce11c2c0f | |||
| 0951d31788 | |||
| 0835119bf3 | |||
| 6c9010ef63 | |||
| fde901877a | |||
| 65f9653ed0 | |||
| a69e185074 | |||
| b256f3518d | |||
| 50dd314d93 | |||
| b7fa1e3dee | |||
| 8feede229c | |||
| 1e8f37992f | |||
| 38bce1d4cf | |||
| cec89e1a0e | |||
| e493a3a5e2 | |||
| e7f34ccd4f | |||
| 3ea046995e | |||
| c1aaa43935 | |||
| f4e4ad34cc | |||
| 5b7dcc7342 | |||
| 70203b5937 | |||
| 62d847602a | |||
| ab2f8d12a7 | |||
| 72983303c5 | |||
| f5a080dd10 | |||
| 91fb62d01c | |||
| ea07064a5c | |||
| b19f3e69a0 | |||
| 9879a1d5f0 | |||
| 8b9ae45549 | |||
| 38cc35069c | |||
| c87cfd653c | |||
| e9fa7cd5d7 | |||
| 2596f95e84 | |||
| 1a62b25caf | |||
| 544fd9876b | |||
| 60b81dfa6f | |||
| ef9c3ca348 | |||
| 9932ee4b4b | |||
| e8efaecb87 | |||
| 5c6f57ee75 | |||
| 040c11f6da | |||
| f0aacc140b | |||
| 10b76987fc | |||
| 01485ceec3 | |||
| 89c7d9cfba | |||
| 7ade7c1794 | |||
| 6b104c5bb0 | |||
| b71474895d | |||
| a6e3b17981 | |||
| a7df656f03 | |||
| c0281feb50 | |||
| 9251427c38 | |||
| 742273a52a | |||
| 7c45fe747f | |||
| 3822e4a563 | |||
| 79d28e80b6 | |||
| 6cbfa7bf4c | |||
| b693cbf99c | |||
| 3c4fbc616f | |||
| 7b3bd1f21a | |||
| 439de3f7f9 | |||
| 4cd7ed4b3b | |||
| 39249c9589 | |||
| 3d2242869d | |||
| 89be34c36c | |||
| 130b987880 | |||
| baab5e7cdf | |||
| 96ae92be8c | |||
| 8fd4731072 | |||
| d83d22f578 | |||
| e535c389aa | |||
| 2eb7bb15e7 | |||
| 05c237ea94 | |||
| 6e57a56987 | |||
| 8a133490bf | |||
| 40040727ab | |||
| 4bfe75bd08 | |||
| d1a29078c0 | |||
| b842d7277a | |||
| 6ccfa2170c | |||
| 26426923b7 | |||
| 00eaffc81f | |||
| afca0d5192 | |||
| 286fdc6b3c | |||
| 7ff9d450cd | |||
| c008afea3c | |||
| e064f08150 | |||
| 3f2e636850 | |||
| 54f0db4066 | |||
| 9863f7d228 | |||
| df5a4094a6 | |||
| ddbb485c41 | |||
| 97f9b8a27b | |||
| 410e26c7ad | |||
| e3342edc4e | |||
| 935a76d90d | |||
| 84eaa6acf5 | |||
| 0b5bf6abef | |||
| 0118c4f6a8 | |||
| fd5b05eb81 | |||
| bf1fe32824 | |||
| 8635407bc7 | |||
| 4818bf7aed | |||
| ad0d7d1745 | |||
| 7566734d6f | |||
| 7963578fc5 | |||
| 074645e32a | |||
| b7e292aebd | |||
| cbf4391177 | |||
| 2f0f9038e2 | |||
| ca57b45071 | |||
| 35ecf99cc4 | |||
| d1fcc90abf | |||
| 7f921bcf47 | |||
| bb7949b35a | |||
| 309e87e25e | |||
| c475f3ce2d | |||
| 6336017c15 | |||
| a0e3480699 | |||
| 4c737f0e40 | |||
| d3ae2bd3cf | |||
| 0400b2263d | |||
| 29c10a41d0 | |||
| fecb08c2b8 | |||
| 86636f52a9 | |||
| a1efc82362 | |||
| 3f76bf54ff | |||
| 32f5de10a0 | |||
| 9e71d46455 | |||
| 1b23979736 | |||
| de737866f2 | |||
| a3e607d19e | |||
| 24588c6731 | |||
| f9582c205a | |||
| 05a12a090d | |||
| db57bb2b71 | |||
| 3db2e8f92b | |||
| 2cdb6dbee5 | |||
| c44d3675c2 | |||
| 32295b15a1 | |||
| 2c3fcc647a | |||
| 38bed912e3 | |||
| 0187c6f0ad | |||
| 3956b133b6 | |||
| 142b69f24b | |||
| 86119c1154 | |||
| 5444687f0f | |||
| a63bd3675f | |||
| 2c2a31ffbc | |||
| 60ba48205e | |||
| 3de12906c8 | |||
| 83f45cd656 | |||
| d5083c333f | |||
| ae1f835028 | |||
| 2f2fefd6af | |||
| 68dec6bffd | |||
| f8ff3fad87 | |||
| 416dff736c | |||
| e93763d420 | |||
| 240cc6cbdc | |||
| 57882177be | |||
| 426b96230a | |||
| 92a537d938 | |||
| f84e0dbd2a | |||
| 0e91f885c3 | |||
| f65fe3663a | |||
| 66828a19b1 | |||
| 3a4376d008 | |||
| cdc51ffd27 | |||
| bc3379e12c | |||
| d4692ad161 | |||
| b87c044c79 | |||
| 2d02f7b29b | |||
| bee361c6f1 | |||
| e3d1a8dabc | |||
| 1ddf3c2b74 | |||
| 943e2aa036 | |||
| 1690319217 | |||
| faf4ff5974 | |||
| 2e12b907ae | |||
| a3dbbc3467 | |||
| cdf19c501d | |||
| 28e6155d8a | |||
| 5d8be090e0 | |||
| f45ac11fb3 | |||
| 80f1a59168 | |||
| 7bc4a01cb5 | |||
| 67047b86ce | |||
| 45f56580a7 | |||
| 86a7845c0c | |||
| 9eb7e9ba1d | |||
| e1cbc073bf | |||
| 05a8580964 | |||
| 41168a49ce | |||
| 041fdc4a7e | |||
| e314c19a3f | |||
| 2e11a04337 | |||
| 0f71c29053 | |||
| b090b79022 | |||
| ec15da2445 | |||
| 2b8599b2df | |||
| f52746d004 | |||
| 52d2e6f6e9 | |||
| 4f403ea899 | |||
| 7a32e4722f | |||
| fcb0f74397 | |||
| f15c99fabf | |||
| 2dce350b33 | |||
| 85aee09e9a | |||
| 2f40c728c9 | |||
| 8c03df1010 | |||
| 3fae83d23a | |||
| 7e4844fc2a | |||
| 6cf06d198c | |||
| 45c7b5b1c7 | |||
| c0864d98ba | |||
| 2e8b85f72e | |||
| 3a2ed96714 | |||
| 724e51c6e6 | |||
| 3d5dea9bf0 | |||
| cb7ed6e083 | |||
| e923917cd9 | |||
| 644ec05233 | |||
| c722753afd | |||
| a86ee2261e | |||
| dee17d5676 | |||
| 258480864d | |||
| 315e67404d | |||
| b1ba03e082 | |||
| eed3186b79 | |||
| 2b5603f6ac | |||
| 0113aae5b7 | |||
| f588cf4050 | |||
| 7029240927 | |||
| 9e00566b9b | |||
| 1f60bc46f3 | |||
| 7732d0fe7a | |||
| d923f76203 | |||
| b5c6fdecf0 | |||
| ba3f9a71a1 | |||
| a6885db912 | |||
| fcb4f11c92 | |||
| 077c00c0b2 | |||
| 8406fa6dd5 | |||
| 6a5472a8e1 | |||
| 0acd84f7cb | |||
| 87d08afb16 | |||
| 0fe17f375a | |||
| 552f8d3091 | |||
| ad1d3c4d4b | |||
| 131e258411 | |||
| 6775b211b6 | |||
| 7a1412e12b | |||
| a459f7f97d | |||
| 75b13f82e9 | |||
| 84eec9e6ba | |||
| c47d259241 | |||
| 5f1918a4a8 | |||
| e02bdce791 | |||
| 8ce1330631 | |||
| ac6aa10f23 | |||
| 31be2f45a9 | |||
| bbe9c6981b | |||
| 854a0d526c | |||
| 486260c68e | |||
| 525dbbf84a | |||
| 21dcaec5d5 | |||
| f1a4c4ead5 | |||
| 4f5faaf044 | |||
| 90166121ee | |||
| e2b6e73fa2 | |||
| f5d98da29e | |||
| 71dccd0774 | |||
| 5ec368d79e | |||
| 39b5d1a63a | |||
| 45cac3fade | |||
| c74f3d4c48 | |||
| 13297ac71c | |||
| dd360d58d9 | |||
| 44b21f117b | |||
| 623d8cb475 | |||
| d718c0c3a8 | |||
| 1d94d57546 | |||
| b9418a1d97 | |||
| c157c7e3fd | |||
| d0b5ed110a | |||
| 8e5d4e4906 | |||
| 37800f1365 | |||
| f427e75049 | |||
| 7b8bdd8601 | |||
| 6d585fe0f0 | |||
| d2749cf72e | |||
| 1c9648c457 | |||
| 2ca6268394 | |||
| dc05dd539f | |||
| af5c3329d7 | |||
| d12ae81664 | |||
| d4f201b860 | |||
| 0c17e766cb | |||
| 125a2882b4 | |||
| d984b10335 | |||
| 09f9d07271 | |||
| 6915174e68 | |||
| a5ecbf7348 | |||
| 5a70987301 | |||
| 87918d3221 | |||
| b8810847d0 | |||
| 3385ca2582 | |||
| 7e56ba2864 | |||
| 554d333ece | |||
| 44c7857b87 | |||
| 47df0f2234 | |||
| 7fc6f41d91 | |||
| 282ae123e2 | |||
| d4b3e56d64 | |||
| 38dfb40ae3 | |||
| f624249d8b | |||
| 3254080d45 | |||
| aa19f478ac | |||
| 0094eba363 | |||
| ee5de66349 | |||
| 0f69b924fb | |||
| f380bf2b61 | |||
| e09473a817 | |||
| 16d4acbfdb | |||
| cabd6d26a2 | |||
| c98a6ac211 | |||
| db07956740 | |||
| 297602c7f4 | |||
| d25e25ee2b | |||
| b6b79faa7e | |||
| 6df29ba5e6 | |||
| 507601a5cf | |||
| 4996922b6d | |||
| 8f5d62fdb1 | |||
| 06107541d3 | |||
| c4d1fd77fa | |||
| 2e4559fa37 | |||
| f5db6ce76a | |||
| 0b07230409 | |||
| 628b59e51d | |||
| ca0848b2ff | |||
| 7d45a2e81c | |||
| a81fd35524 | |||
| eab338104d | |||
| f87db5e412 | |||
| c43749289d | |||
| 8f6454bfac | |||
| 2de90beeeb | |||
| fa6dce250f | |||
| ade7371a41 | |||
| 196cce6e9b | |||
| 6beae766ee | |||
| da5ef25db9 | |||
| 9f831bdeaf | |||
| 4df69506a8 | |||
| fc8fc400e3 | |||
| 99a2771189 | |||
| 6292532fd1 | |||
| 19732cc07a | |||
| 5d8b98608c | |||
| 96161ac408 | |||
| 24e2fa1590 | |||
| e79a0faeae | |||
| 0501beb846 | |||
| 637e81752a | |||
| e695470794 | |||
| e65bfc0971 | |||
| 81156d20cd | |||
| 457dd4392b | |||
| 8d6acc6c29 | |||
| e239fc3b0b | |||
| dcaa5100c9 | |||
| c15bb3fe19 | |||
| eac4aecc3d | |||
| 2390b2cf65 | |||
| c972433a85 | |||
| 4bf97415a4 | |||
| b7cb126ccc | |||
| 6ac77534bf | |||
| 80af1048cf | |||
| 4cff3fae11 | |||
| f6253147df | |||
| 7799b6128f | |||
| 11afb709ec | |||
| 3c3cf17a49 | |||
| 95a75a715f | |||
| 833635e259 | |||
| 183ce067e0 | |||
| b4ce313e6c | |||
| 126bddd1ba | |||
| c962c2adbf | |||
| 6c7b68d414 | |||
| d43e308e7f | |||
| 515ed3ad2a | |||
| ad7390636d | |||
| 57820456bd | |||
| 1fc0fa4617 | |||
| f00f22a3e2 | |||
| 4a6a35bc65 | |||
| 08b41b413a | |||
| 85ea462c08 | |||
| e57468b8a8 | |||
| baf1ebe9f0 | |||
| 3fefee9910 | |||
| 80f7296091 | |||
| ac227093e4 | |||
| 691878ee2f | |||
| f4b7420dfe | |||
| 6a3c883c8b | |||
| f778edb739 | |||
| 2a5a384970 | |||
| 842298f84f | |||
| 6d92c429c7 | |||
| 19c217b4b7 | |||
| 5439cda7f0 | |||
| 841d979190 | |||
| 021b52e7a8 | |||
| 653379c094 | |||
| 2708bfa127 | |||
| d1f5ca1afd | |||
| db3503949d | |||
| fe78fe98ca | |||
| 2c335037bd | |||
| e118e085ea | |||
| 1a354d53c4 | |||
| 2085f20901 | |||
| 979ca24e39 | |||
| 7b3d4df47a | |||
| 74bec9865c | |||
| 2ae3be5442 | |||
| 84c60a7b50 | |||
| 6f0a9b41ef | |||
| 497346d07e | |||
| 1144d336b6 | |||
| 531336bbfd | |||
| f6d3fee855 | |||
| 22454ae492 | |||
| b25067d807 | |||
| dea563c943 | |||
| 32090c729f | |||
| 6f8e644f09 | |||
| edd3fce2f7 | |||
| 9a2dabae70 | |||
| 0167edc854 | |||
| 7a787c68c6 | |||
| 669e3c50c9 | |||
| ebc4edfe7a | |||
| 1b730c3d11 | |||
| b212ff9f49 | |||
| 7d9a33fb5c | |||
| 4663c609b9 | |||
| 735d2bb69b | |||
| 51d7ebf260 | |||
| 4aa16fce6c | |||
| 7cbf8429d9 | |||
| c4f7eb124b | |||
| 5f3c57fc84 | |||
| 96881729ce | |||
| 1eb40338ac | |||
| 6e058e84fd | |||
| 3fc221d077 | |||
| 7b83feb50a | |||
| 762416ffa8 | |||
| 74837171ab | |||
| 6950ccec1b | |||
| 9a94bb8e21 | |||
| 97f3beed36 | |||
| 021f2ea987 | |||
| c425d60bb9 | |||
| 6820904454 | |||
| aa0135f2e0 | |||
| 27b819b0e3 | |||
| 68cc4ccde2 | |||
| 1a00863e95 | |||
| 44eaa2b303 | |||
| 57b980a613 | |||
| 16f0b7d72c | |||
| 704d1feca1 | |||
| 68d925195e | |||
| 7480ded658 | |||
| 28e091430e | |||
| 444ea95a80 | |||
| 285131bfb4 | |||
| c4fa908fa9 | |||
| efb35a4107 | |||
| 6ea6266625 | |||
| 68810aa26c | |||
| ca76618d6b | |||
| 9dc8fb2fc7 | |||
| 5cd7086fdb | |||
| 4e3208662e | |||
| ac2c06d492 | |||
| bf0201e184 | |||
| b67fd797be | |||
| c9504b2f50 | |||
| daec528ca9 | |||
| 0554e4d5c5 | |||
| 7ec6aad23d | |||
| 03f8b9c9e0 | |||
| 37bc0b4e53 | |||
| 20f169b523 | |||
| 3e9fdcf019 | |||
| 4fbc924d0a | |||
| 61d18ae035 | |||
| 222c09a635 | |||
| 31838d3e11 | |||
| 84f360e862 | |||
| 9f33116898 | |||
| 20fa9eb035 | |||
| 16b6df6fca | |||
| f21bc4215a | |||
| f012c00ada | |||
| af9cb94974 | |||
| 533624c5a9 | |||
| b2c477fc6d | |||
| 42d57549b8 | |||
| a54961c5f7 | |||
| 9fbf7c87c3 | |||
| 0a03a86813 | |||
| d72343d2b8 | |||
| 768e6c1449 | |||
| 623b4f7c63 | |||
| 5be1242ac0 | |||
| 484e7a441f | |||
| ac224bb079 | |||
| f18c6fa94c | |||
| 1d71227295 | |||
| e36a83d3a3 | |||
| cac877425c | |||
| 794441c379 | |||
| f872f18dca | |||
| 8d187e7feb | |||
| cc406da4de | |||
| 59fb636948 | |||
| 25b8b8a6f2 | |||
| b67f345d00 | |||
| f71fb5c36e | |||
| d2183a46fb | |||
| 83c552d390 | |||
| 5ab87cd4da | |||
| 5a06118b39 | |||
| 9f89fa02ed | |||
| 2e9af29494 | |||
| 443fdaf29f | |||
| ae929dcbbd | |||
| 65cb94ff77 | |||
| e34dd055e9 | |||
| 927f654427 | |||
| 2380136722 | |||
| 857ab55c01 | |||
| 19d37c2dd3 | |||
| 21aecc0971 | |||
| 9e1775dd23 | |||
| 774ed4a027 | |||
| f2ab21833f | |||
| dbac8899fe | |||
| 0b4c3a1a53 | |||
| 38f95d1846 | |||
| d33dc7966a | |||
| 8c2618e6aa | |||
| 8f6373c61c | |||
| e68c3756fe | |||
| 08cb5718ec | |||
| f8a989cfb2 | |||
| c043ce6cfd | |||
| a1392883ce | |||
| d7d60df0ec | |||
| d1ba56d8d8 | |||
| 04cddaf402 | |||
| 600496fa50 | |||
| 1bfa347707 | |||
| f80775df2b | |||
| 1e847b40c0 | |||
| 1c121916f3 | |||
| 10fd4fa1a6 | |||
| 2c5597f6c7 | |||
| b5e2b183af | |||
| e13f72fbff | |||
| 133c5e40c4 | |||
| b2f500256e | |||
| 87e6e4fe5c | |||
| c1138273d4 | |||
| 03885a3f50 | |||
| 501307b58b | |||
| b058490ceb | |||
| 705ca7f21b | |||
| 116829900a | |||
| 415810664b | |||
| 676643c6d6 | |||
| f566c6e3b7 | |||
| fe4197ab11 | |||
| 86b40073e9 | |||
| ee55ea692b | |||
| ef47d4f848 | |||
| 8f2cc1c3ab | |||
| 2d30443cd3 | |||
| 1d651868d6 | |||
| 6b655cc63f | |||
| 6a7b9da2ae | |||
| d8c09c6541 | |||
| 4210579522 | |||
| 355dc0ce67 | |||
| 207594be81 | |||
| b0c7d2ec58 | |||
| fa39ff9fc4 | |||
| 05fa1a7ac1 | |||
| 87a033d9fa | |||
| 13504dcbea | |||
| 1045a36c1f | |||
| 7df4b90c76 | |||
| e37bc579fc | |||
| 17efc806b4 | |||
| 2a56edb321 | |||
| 824fd44fc3 | |||
| c94c1b8967 | |||
| ec3567fe20 | |||
| d0422de563 | |||
| e846a56ca4 | |||
| a6b7b47a39 | |||
| eec9c8bbd7 | |||
| e51c7b5872 | |||
| 27b3031de2 | |||
| 185876392c | |||
| 033c3ed95a | |||
| c075fb7855 | |||
| 5722d05831 | |||
| 0062058399 | |||
| 7ae6f07004 | |||
| 97ec17f73b | |||
| b513ec8bbd | |||
| 7af80f6618 | |||
| 2a33734606 | |||
| b6ec956976 | |||
| c1125dc2ba | |||
| 33f36c869f | |||
| 0940e9b242 | |||
| b37cf7dee4 | |||
| 952a77b05d | |||
| 8a818c26cb | |||
| 1b0ca7d270 | |||
| 1531b31978 | |||
| 3883e3a75e | |||
| cd583bdaa5 | |||
| 281e1fba75 | |||
| 091693b494 | |||
| 84ea427f46 | |||
| c4a96cecbc | |||
| 77d6c826d8 | |||
| 0b4ea79a0c | |||
| ff066119ca | |||
| 95119ad7b0 | |||
| bdbe3df869 | |||
| cbf036f7ae | |||
| c4a0fb5199 | |||
| d194d639ab | |||
| bef1e3e4a0 | |||
| b18d8534ea | |||
| 48463ebb33 | |||
| 2e07180cba | |||
| 465a8b8d10 | |||
| 8ae24e19b2 | |||
| 12e1b4c6df | |||
| 5061a9fd55 | |||
| 8010fda9bf | |||
| 459677aebe | |||
| c40ecfd740 | |||
| 7c9c41f43c |
@ -78,10 +78,10 @@ jobs:
|
||||
keys:
|
||||
- v0.4-torch_and_tf-{{ checksum "setup.py" }}
|
||||
- v0.4-{{ checksum "setup.py" }}
|
||||
- run: sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev
|
||||
- run: sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng
|
||||
- run: pip install --upgrade pip
|
||||
- run: pip install .[sklearn,tf-cpu,torch,testing,sentencepiece,torch-speech,vision]
|
||||
- run: pip install torch-scatter -f https://pytorch-geometric.com/whl/torch-1.10.0+cpu.html
|
||||
- run: pip install torch-scatter -f https://pytorch-geometric.com/whl/torch-1.11.0+cpu.html
|
||||
- run: pip install tensorflow_probability
|
||||
- run: pip install https://github.com/kpu/kenlm/archive/master.zip
|
||||
- save_cache:
|
||||
@ -99,7 +99,7 @@ jobs:
|
||||
path: ~/transformers/tests_output.txt
|
||||
- store_artifacts:
|
||||
path: ~/transformers/reports
|
||||
|
||||
|
||||
run_tests_torch_and_tf_all:
|
||||
working_directory: ~/transformers
|
||||
docker:
|
||||
@ -116,10 +116,10 @@ jobs:
|
||||
keys:
|
||||
- v0.4-torch_and_tf-{{ checksum "setup.py" }}
|
||||
- v0.4-{{ checksum "setup.py" }}
|
||||
- run: sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev
|
||||
- run: sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng
|
||||
- run: pip install --upgrade pip
|
||||
- run: pip install .[sklearn,tf-cpu,torch,testing,sentencepiece,torch-speech,vision]
|
||||
- run: pip install torch-scatter -f https://pytorch-geometric.com/whl/torch-1.10.0+cpu.html
|
||||
- run: pip install torch-scatter -f https://pytorch-geometric.com/whl/torch-1.11.0+cpu.html
|
||||
- run: pip install tensorflow_probability
|
||||
- run: pip install https://github.com/kpu/kenlm/archive/master.zip
|
||||
- save_cache:
|
||||
@ -149,10 +149,10 @@ jobs:
|
||||
keys:
|
||||
- v0.4-torch_and_flax-{{ checksum "setup.py" }}
|
||||
- v0.4-{{ checksum "setup.py" }}
|
||||
- run: sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev
|
||||
- run: sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng
|
||||
- run: pip install --upgrade pip
|
||||
- run: pip install .[sklearn,flax,torch,testing,sentencepiece,torch-speech,vision]
|
||||
- run: pip install torch-scatter -f https://pytorch-geometric.com/whl/torch-1.10.0+cpu.html
|
||||
- run: pip install torch-scatter -f https://pytorch-geometric.com/whl/torch-1.11.0+cpu.html
|
||||
- run: pip install https://github.com/kpu/kenlm/archive/master.zip
|
||||
- save_cache:
|
||||
key: v0.4-{{ checksum "setup.py" }}
|
||||
@ -169,7 +169,7 @@ jobs:
|
||||
path: ~/transformers/tests_output.txt
|
||||
- store_artifacts:
|
||||
path: ~/transformers/reports
|
||||
|
||||
|
||||
run_tests_torch_and_flax_all:
|
||||
working_directory: ~/transformers
|
||||
docker:
|
||||
@ -186,10 +186,10 @@ jobs:
|
||||
keys:
|
||||
- v0.4-torch_and_flax-{{ checksum "setup.py" }}
|
||||
- v0.4-{{ checksum "setup.py" }}
|
||||
- run: sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev
|
||||
- run: sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng
|
||||
- run: pip install --upgrade pip
|
||||
- run: pip install .[sklearn,flax,torch,testing,sentencepiece,torch-speech,vision]
|
||||
- run: pip install torch-scatter -f https://pytorch-geometric.com/whl/torch-1.10.0+cpu.html
|
||||
- run: pip install torch-scatter -f https://pytorch-geometric.com/whl/torch-1.11.0+cpu.html
|
||||
- run: pip install https://github.com/kpu/kenlm/archive/master.zip
|
||||
- save_cache:
|
||||
key: v0.4-{{ checksum "setup.py" }}
|
||||
@ -217,10 +217,10 @@ jobs:
|
||||
keys:
|
||||
- v0.4-torch-{{ checksum "setup.py" }}
|
||||
- v0.4-{{ checksum "setup.py" }}
|
||||
- run: sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev
|
||||
- run: sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng
|
||||
- run: pip install --upgrade pip
|
||||
- run: pip install .[sklearn,torch,testing,sentencepiece,torch-speech,vision,timm]
|
||||
- run: pip install torch-scatter -f https://pytorch-geometric.com/whl/torch-1.10.0+cpu.html
|
||||
- run: pip install torch-scatter -f https://pytorch-geometric.com/whl/torch-1.11.0+cpu.html
|
||||
- run: pip install https://github.com/kpu/kenlm/archive/master.zip
|
||||
- save_cache:
|
||||
key: v0.4-torch-{{ checksum "setup.py" }}
|
||||
@ -237,7 +237,7 @@ jobs:
|
||||
path: ~/transformers/tests_output.txt
|
||||
- store_artifacts:
|
||||
path: ~/transformers/reports
|
||||
|
||||
|
||||
run_tests_torch_all:
|
||||
working_directory: ~/transformers
|
||||
docker:
|
||||
@ -253,10 +253,10 @@ jobs:
|
||||
keys:
|
||||
- v0.4-torch-{{ checksum "setup.py" }}
|
||||
- v0.4-{{ checksum "setup.py" }}
|
||||
- run: sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev
|
||||
- run: sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng
|
||||
- run: pip install --upgrade pip
|
||||
- run: pip install .[sklearn,torch,testing,sentencepiece,torch-speech,vision,timm]
|
||||
- run: pip install torch-scatter -f https://pytorch-geometric.com/whl/torch-1.10.0+cpu.html
|
||||
- run: pip install torch-scatter -f https://pytorch-geometric.com/whl/torch-1.11.0+cpu.html
|
||||
- run: pip install https://github.com/kpu/kenlm/archive/master.zip
|
||||
- save_cache:
|
||||
key: v0.4-torch-{{ checksum "setup.py" }}
|
||||
@ -284,7 +284,7 @@ jobs:
|
||||
keys:
|
||||
- v0.4-tf-{{ checksum "setup.py" }}
|
||||
- v0.4-{{ checksum "setup.py" }}
|
||||
- run: sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev
|
||||
- run: sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng
|
||||
- run: pip install --upgrade pip
|
||||
- run: pip install .[sklearn,tf-cpu,testing,sentencepiece,tf-speech,vision]
|
||||
- run: pip install tensorflow_probability
|
||||
@ -304,7 +304,7 @@ jobs:
|
||||
path: ~/transformers/tests_output.txt
|
||||
- store_artifacts:
|
||||
path: ~/transformers/reports
|
||||
|
||||
|
||||
run_tests_tf_all:
|
||||
working_directory: ~/transformers
|
||||
docker:
|
||||
@ -320,7 +320,7 @@ jobs:
|
||||
keys:
|
||||
- v0.4-tf-{{ checksum "setup.py" }}
|
||||
- v0.4-{{ checksum "setup.py" }}
|
||||
- run: sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev
|
||||
- run: sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng
|
||||
- run: pip install --upgrade pip
|
||||
- run: pip install .[sklearn,tf-cpu,testing,sentencepiece,tf-speech,vision]
|
||||
- run: pip install tensorflow_probability
|
||||
@ -351,7 +351,7 @@ jobs:
|
||||
keys:
|
||||
- v0.4-flax-{{ checksum "setup.py" }}
|
||||
- v0.4-{{ checksum "setup.py" }}
|
||||
- run: sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev
|
||||
- run: sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng
|
||||
- run: pip install --upgrade pip
|
||||
- run: pip install .[flax,testing,sentencepiece,flax-speech,vision]
|
||||
- run: pip install https://github.com/kpu/kenlm/archive/master.zip
|
||||
@ -370,7 +370,7 @@ jobs:
|
||||
path: ~/transformers/tests_output.txt
|
||||
- store_artifacts:
|
||||
path: ~/transformers/reports
|
||||
|
||||
|
||||
run_tests_flax_all:
|
||||
working_directory: ~/transformers
|
||||
docker:
|
||||
@ -386,7 +386,7 @@ jobs:
|
||||
keys:
|
||||
- v0.4-flax-{{ checksum "setup.py" }}
|
||||
- v0.4-{{ checksum "setup.py" }}
|
||||
- run: sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev
|
||||
- run: sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng
|
||||
- run: pip install --upgrade pip
|
||||
- run: pip install .[flax,testing,sentencepiece,vision,flax-speech]
|
||||
- run: pip install https://github.com/kpu/kenlm/archive/master.zip
|
||||
@ -417,10 +417,10 @@ jobs:
|
||||
keys:
|
||||
- v0.4-torch-{{ checksum "setup.py" }}
|
||||
- v0.4-{{ checksum "setup.py" }}
|
||||
- run: sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev
|
||||
- run: sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng
|
||||
- run: pip install --upgrade pip
|
||||
- run: pip install .[sklearn,torch,testing,sentencepiece,torch-speech,vision,timm]
|
||||
- run: pip install torch-scatter -f https://pytorch-geometric.com/whl/torch-1.10.0+cpu.html
|
||||
- run: pip install torch-scatter -f https://pytorch-geometric.com/whl/torch-1.11.0+cpu.html
|
||||
- run: pip install https://github.com/kpu/kenlm/archive/master.zip
|
||||
- save_cache:
|
||||
key: v0.4-torch-{{ checksum "setup.py" }}
|
||||
@ -437,7 +437,7 @@ jobs:
|
||||
path: ~/transformers/tests_output.txt
|
||||
- store_artifacts:
|
||||
path: ~/transformers/reports
|
||||
|
||||
|
||||
run_tests_pipelines_torch_all:
|
||||
working_directory: ~/transformers
|
||||
docker:
|
||||
@ -454,10 +454,10 @@ jobs:
|
||||
keys:
|
||||
- v0.4-torch-{{ checksum "setup.py" }}
|
||||
- v0.4-{{ checksum "setup.py" }}
|
||||
- run: sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev
|
||||
- run: sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng
|
||||
- run: pip install --upgrade pip
|
||||
- run: pip install .[sklearn,torch,testing,sentencepiece,torch-speech,vision,timm]
|
||||
- run: pip install torch-scatter -f https://pytorch-geometric.com/whl/torch-1.10.0+cpu.html
|
||||
- run: pip install torch-scatter -f https://pytorch-geometric.com/whl/torch-1.11.0+cpu.html
|
||||
- run: pip install https://github.com/kpu/kenlm/archive/master.zip
|
||||
- save_cache:
|
||||
key: v0.4-torch-{{ checksum "setup.py" }}
|
||||
@ -549,7 +549,7 @@ jobs:
|
||||
- v0.4-custom_tokenizers-{{ checksum "setup.py" }}
|
||||
- v0.4-{{ checksum "setup.py" }}
|
||||
- run: pip install --upgrade pip
|
||||
- run: pip install .[ja,testing,sentencepiece,jieba]
|
||||
- run: pip install .[ja,testing,sentencepiece,jieba,spacy,ftfy]
|
||||
- run: python -m unidic download
|
||||
- save_cache:
|
||||
key: v0.4-custom_tokenizers-{{ checksum "setup.py" }}
|
||||
@ -557,7 +557,11 @@ jobs:
|
||||
- '~/.cache/pip'
|
||||
- run: |
|
||||
if [ -f test_list.txt ]; then
|
||||
python -m pytest -s --make-reports=tests_custom_tokenizers ./tests/test_tokenization_bert_japanese.py | tee tests_output.txt
|
||||
python -m pytest -s --make-reports=tests_custom_tokenizers ./tests/test_tokenization_bert_japanese.py ./tests/test_tokenization_openai.py | tee tests_output.txt
|
||||
fi
|
||||
- run: |
|
||||
if [ -f test_list.txt ]; then
|
||||
python -m pytest -n 1 tests/test_tokenization_clip.py --dist=loadfile -s --make-reports=tests_tokenization_clip --durations=100 | tee tests_output.txt
|
||||
fi
|
||||
- store_artifacts:
|
||||
path: ~/transformers/tests_output.txt
|
||||
@ -579,7 +583,7 @@ jobs:
|
||||
keys:
|
||||
- v0.4-torch_examples-{{ checksum "setup.py" }}
|
||||
- v0.4-{{ checksum "setup.py" }}
|
||||
- run: sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev
|
||||
- run: sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng
|
||||
- run: pip install --upgrade pip
|
||||
- run: pip install .[sklearn,torch,sentencepiece,testing,torch-speech]
|
||||
- run: pip install -r examples/pytorch/_tests_requirements.txt
|
||||
@ -614,7 +618,7 @@ jobs:
|
||||
keys:
|
||||
- v0.4-torch_examples-{{ checksum "setup.py" }}
|
||||
- v0.4-{{ checksum "setup.py" }}
|
||||
- run: sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev
|
||||
- run: sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng
|
||||
- run: pip install --upgrade pip
|
||||
- run: pip install .[sklearn,torch,sentencepiece,testing,torch-speech]
|
||||
- run: pip install -r examples/pytorch/_tests_requirements.txt
|
||||
@ -662,7 +666,7 @@ jobs:
|
||||
path: ~/transformers/flax_examples_output.txt
|
||||
- store_artifacts:
|
||||
path: ~/transformers/reports
|
||||
|
||||
|
||||
run_examples_flax_all:
|
||||
working_directory: ~/transformers
|
||||
docker:
|
||||
@ -729,7 +733,7 @@ jobs:
|
||||
path: ~/transformers/tests_output.txt
|
||||
- store_artifacts:
|
||||
path: ~/transformers/reports
|
||||
|
||||
|
||||
run_tests_hub_all:
|
||||
working_directory: ~/transformers
|
||||
docker:
|
||||
@ -779,7 +783,7 @@ jobs:
|
||||
- v0.4-torch-{{ checksum "setup.py" }}
|
||||
- v0.4-{{ checksum "setup.py" }}
|
||||
- run: pip install --upgrade pip
|
||||
- run: pip install .[torch,testing,sentencepiece,onnxruntime]
|
||||
- run: pip install .[torch,testing,sentencepiece,onnxruntime,vision]
|
||||
- save_cache:
|
||||
key: v0.4-onnx-{{ checksum "setup.py" }}
|
||||
paths:
|
||||
@ -795,7 +799,7 @@ jobs:
|
||||
path: ~/transformers/tests_output.txt
|
||||
- store_artifacts:
|
||||
path: ~/transformers/reports
|
||||
|
||||
|
||||
run_tests_onnxruntime_all:
|
||||
working_directory: ~/transformers
|
||||
docker:
|
||||
@ -812,7 +816,7 @@ jobs:
|
||||
- v0.4-torch-{{ checksum "setup.py" }}
|
||||
- v0.4-{{ checksum "setup.py" }}
|
||||
- run: pip install --upgrade pip
|
||||
- run: pip install .[torch,testing,sentencepiece,onnxruntime]
|
||||
- run: pip install .[torch,testing,sentencepiece,onnxruntime,vision]
|
||||
- save_cache:
|
||||
key: v0.4-onnx-{{ checksum "setup.py" }}
|
||||
paths:
|
||||
@ -848,7 +852,7 @@ jobs:
|
||||
- run: isort --check-only examples tests src utils
|
||||
- run: python utils/custom_init_isort.py --check_only
|
||||
- run: flake8 examples tests src utils
|
||||
- run: python utils/style_doc.py src/transformers docs/source --max_len 119 --check_only
|
||||
- run: doc-builder style src/transformers docs/source --max_len 119 --check_only --path_to_docs docs/source
|
||||
|
||||
check_repository_consistency:
|
||||
working_directory: ~/transformers
|
||||
@ -951,7 +955,7 @@ workflow_filters: &workflow_filters
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
- main
|
||||
workflows:
|
||||
version: 2
|
||||
build_and_test:
|
||||
@ -978,7 +982,7 @@ workflows:
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
- main
|
||||
jobs:
|
||||
- run_examples_torch_all
|
||||
- run_examples_flax_all
|
||||
@ -1000,7 +1004,7 @@ workflows:
|
||||
# filters:
|
||||
# branches:
|
||||
# only:
|
||||
# - master
|
||||
# - main
|
||||
# jobs:
|
||||
# - cleanup-gke-jobs
|
||||
# - run_examples_tpu
|
||||
|
||||
11
.github/ISSUE_TEMPLATE/bug-report.md
vendored
11
.github/ISSUE_TEMPLATE/bug-report.md
vendored
@ -28,12 +28,13 @@ assignees: ''
|
||||
Models:
|
||||
|
||||
- ALBERT, BERT, XLM, DeBERTa, DeBERTa-v2, ELECTRA, MobileBert, SqueezeBert: @LysandreJik
|
||||
- T5, BART, Marian, Pegasus, EncoderDecoder: @patrickvonplaten
|
||||
- Blenderbot, MBART: @patil-suraj
|
||||
- Longformer, Reformer, TransfoXL, XLNet, FNet, BigBird: @patrickvonplaten
|
||||
- T5, Pegasus, EncoderDecoder: @patrickvonplaten
|
||||
- Blenderbot, MBART, BART, Marian, Pegasus: @patil-suraj
|
||||
- Reformer, TransfoXL, XLNet, FNet: @patrickvonplaten
|
||||
- Longformer, BigBird: @ydshieh
|
||||
- FSMT: @stas00
|
||||
- Funnel: @sgugger
|
||||
- GPT-2, GPT: @patrickvonplaten, @LysandreJik
|
||||
- GPT-2, GPT: @patil-suraj, @patrickvonplaten, @LysandreJik
|
||||
- RAG, DPR: @patrickvonplaten, @lhoestq
|
||||
- TensorFlow: @Rocketknight1
|
||||
- JAX/Flax: @patil-suraj
|
||||
@ -49,7 +50,7 @@ Library:
|
||||
- Deepspeed: @stas00
|
||||
- Ray/raytune: @richardliaw, @amogkam
|
||||
- Text generation: @patrickvonplaten @narsil
|
||||
- Tokenizers: @LysandreJik
|
||||
- Tokenizers: @SaulLu
|
||||
- Trainer: @sgugger
|
||||
- Pipelines: @Narsil
|
||||
- Speech: @patrickvonplaten, @anton-l
|
||||
|
||||
2
.github/ISSUE_TEMPLATE/feature-request.md
vendored
2
.github/ISSUE_TEMPLATE/feature-request.md
vendored
@ -22,4 +22,4 @@ assignees: ''
|
||||
|
||||
<!-- Is there any way that you could help, e.g. by submitting a PR?
|
||||
Make sure to read the CONTRIBUTING.MD readme:
|
||||
https://github.com/huggingface/transformers/blob/master/CONTRIBUTING.md -->
|
||||
https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md -->
|
||||
|
||||
6
.github/PULL_REQUEST_TEMPLATE.md
vendored
6
.github/PULL_REQUEST_TEMPLATE.md
vendored
@ -17,13 +17,13 @@ Fixes # (issue)
|
||||
|
||||
## Before submitting
|
||||
- [ ] This PR fixes a typo or improves the docs (you can dismiss the other checks if that's the case).
|
||||
- [ ] Did you read the [contributor guideline](https://github.com/huggingface/transformers/blob/master/CONTRIBUTING.md#start-contributing-pull-requests),
|
||||
- [ ] Did you read the [contributor guideline](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md#start-contributing-pull-requests),
|
||||
Pull Request section?
|
||||
- [ ] Was this discussed/approved via a Github issue or the [forum](https://discuss.huggingface.co/)? Please add a link
|
||||
to it if that's the case.
|
||||
- [ ] Did you make sure to update the documentation with your changes? Here are the
|
||||
[documentation guidelines](https://github.com/huggingface/transformers/tree/master/docs), and
|
||||
[here are tips on formatting docstrings](https://github.com/huggingface/transformers/tree/master/docs#writing-source-documentation).
|
||||
[documentation guidelines](https://github.com/huggingface/transformers/tree/main/docs), and
|
||||
[here are tips on formatting docstrings](https://github.com/huggingface/transformers/tree/main/docs#writing-source-documentation).
|
||||
- [ ] Did you write any new necessary tests?
|
||||
|
||||
|
||||
|
||||
61
.github/workflows/add-model-like.yml
vendored
Normal file
61
.github/workflows/add-model-like.yml
vendored
Normal file
@ -0,0 +1,61 @@
|
||||
name: Add model like runner
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
paths:
|
||||
- "src/**"
|
||||
- "tests/**"
|
||||
- ".github/**"
|
||||
types: [opened, synchronize, reopened]
|
||||
|
||||
jobs:
|
||||
run_tests_templates_like:
|
||||
name: "Add new model like template tests"
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Loading cache.
|
||||
uses: actions/cache@v2
|
||||
id: cache
|
||||
with:
|
||||
path: ~/.cache/pip
|
||||
key: v1-tests_model_like
|
||||
restore-keys: |
|
||||
v1-tests_model_like-${{ hashFiles('setup.py') }}
|
||||
v1-tests_model_like
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
pip install --upgrade pip!=21.3
|
||||
pip install -U click # Click 7 is installed in the environment by default, but we need at least version 8 for Black
|
||||
sudo apt -y update && sudo apt install -y libsndfile1-dev
|
||||
pip install .[dev]
|
||||
|
||||
- name: Create model files
|
||||
run: |
|
||||
transformers-cli add-new-model-like --config_file tests/fixtures/add_distilbert_like_config.json --path_to_repo .
|
||||
make style
|
||||
make fix-copies
|
||||
|
||||
- name: Run all PyTorch modeling test
|
||||
run: |
|
||||
python -m pytest -n 2 --dist=loadfile -s --make-reports=tests_new_models tests/bert_new/test_modeling_bert_new.py
|
||||
|
||||
- name: Run style changes
|
||||
run: |
|
||||
make style && make quality && make repo-consistency
|
||||
|
||||
- name: Failure short reports
|
||||
if: ${{ always() }}
|
||||
run: cat reports/tests_new_models/failures_short.txt
|
||||
|
||||
- name: Test suite reports artifacts
|
||||
if: ${{ always() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: run_all_tests_new_models_test_reports
|
||||
path: reports/tests_new_models
|
||||
145
.github/workflows/build-docker-images.yml
vendored
Normal file
145
.github/workflows/build-docker-images.yml
vendored
Normal file
@ -0,0 +1,145 @@
|
||||
name: Build docker images (scheduled)
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- docker-image*
|
||||
repository_dispatch:
|
||||
schedule:
|
||||
- cron: "0 1 * * *"
|
||||
|
||||
concurrency:
|
||||
group: docker-images-builds
|
||||
cancel-in-progress: false
|
||||
|
||||
jobs:
|
||||
latest-docker:
|
||||
name: "Latest PyTorch + TensorFlow [dev]"
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
-
|
||||
name: Check out code
|
||||
uses: actions/checkout@v2
|
||||
-
|
||||
name: Login to DockerHub
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||
-
|
||||
name: Build and push
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: ./docker/transformers-all-latest-gpu
|
||||
build-args: |
|
||||
REF=main
|
||||
push: true
|
||||
tags: huggingface/transformers-all-latest-gpu
|
||||
|
||||
latest-torch-deepspeed-docker:
|
||||
name: "Latest PyTorch + DeepSpeed"
|
||||
needs: latest-docker
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
-
|
||||
name: Check out code
|
||||
uses: actions/checkout@v2
|
||||
-
|
||||
name: Login to DockerHub
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||
-
|
||||
name: Build and push
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: ./docker/transformers-pytorch-deepspeed-latest-gpu
|
||||
build-args: |
|
||||
REF=main
|
||||
push: true
|
||||
tags: huggingface/transformers-pytorch-deepspeed-latest-gpu
|
||||
|
||||
doc-builder:
|
||||
name: "Doc builder"
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
-
|
||||
name: Check out code
|
||||
uses: actions/checkout@v2
|
||||
-
|
||||
name: Login to DockerHub
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||
-
|
||||
name: Build and push
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: ./docker/transformers-doc-builder
|
||||
push: true
|
||||
tags: huggingface/transformers-doc-builder
|
||||
|
||||
latest-pytorch:
|
||||
name: "Latest PyTorch [dev]"
|
||||
runs-on: ubuntu-latest
|
||||
needs: latest-torch-deepspeed-docker
|
||||
steps:
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
-
|
||||
name: Check out code
|
||||
uses: actions/checkout@v2
|
||||
-
|
||||
name: Login to DockerHub
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||
-
|
||||
name: Build and push
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: ./docker/transformers-pytorch-gpu
|
||||
build-args: |
|
||||
REF=main
|
||||
push: true
|
||||
tags: huggingface/transformers-pytorch-gpu
|
||||
|
||||
latest-tensorflow:
|
||||
needs: latest-pytorch
|
||||
name: "Latest TensorFlow [dev]"
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
-
|
||||
name: Check out code
|
||||
uses: actions/checkout@v2
|
||||
-
|
||||
name: Login to DockerHub
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||
-
|
||||
name: Build and push
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: ./docker/transformers-tensorflow-gpu
|
||||
build-args: |
|
||||
REF=main
|
||||
push: true
|
||||
tags: huggingface/transformers-tensorflow-gpu
|
||||
50
.github/workflows/build_doc_test.yml
vendored
50
.github/workflows/build_doc_test.yml
vendored
@ -1,50 +0,0 @@
|
||||
name: Documentation test build
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
paths:
|
||||
- "src/**"
|
||||
- "docs/**"
|
||||
- ".github/**"
|
||||
|
||||
jobs:
|
||||
build_and_package:
|
||||
runs-on: ubuntu-latest
|
||||
defaults:
|
||||
run:
|
||||
shell: bash -l {0}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Loading cache.
|
||||
uses: actions/cache@v2
|
||||
id: cache
|
||||
with:
|
||||
path: ~/.cache/pip
|
||||
key: v1-test_build_doc
|
||||
restore-keys: |
|
||||
v1-test_build_doc-${{ hashFiles('setup.py') }}
|
||||
v1-test_build_doc
|
||||
|
||||
- name: Setup environment
|
||||
run: |
|
||||
pip install --upgrade pip
|
||||
sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev
|
||||
|
||||
pip install git+https://github.com/huggingface/doc-builder
|
||||
pip install .[dev]
|
||||
|
||||
export TORCH_VERSION=$(python -c "from torch import version; print(version.__version__.split('+')[0])")
|
||||
pip install torch-scatter -f https://data.pyg.org/whl/torch-${TORCH_VERSION}+cpu.html
|
||||
|
||||
pip install torchvision
|
||||
python -m pip install 'git+https://github.com/facebookresearch/detectron2.git'
|
||||
|
||||
sudo apt install tesseract-ocr
|
||||
pip install pytesseract
|
||||
pip install pytorch-quantization --extra-index-url https://pypi.ngc.nvidia.com
|
||||
|
||||
- name: Make documentation
|
||||
run: |
|
||||
doc-builder build transformers ./docs/source
|
||||
103
.github/workflows/build_documentation.yml
vendored
103
.github/workflows/build_documentation.yml
vendored
@ -3,97 +3,18 @@ name: Build documentation
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- main
|
||||
- doc-builder*
|
||||
- v*-release
|
||||
- use_templates
|
||||
|
||||
jobs:
|
||||
build_and_package:
|
||||
runs-on: ubuntu-latest
|
||||
defaults:
|
||||
run:
|
||||
shell: bash -l {0}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
repository: 'huggingface/doc-builder'
|
||||
path: doc-builder
|
||||
token: ${{ secrets.HUGGINGFACE_PUSH }}
|
||||
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
repository: 'huggingface/transformers'
|
||||
path: transformers
|
||||
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
repository: 'huggingface/notebooks'
|
||||
path: notebooks
|
||||
token: ${{ secrets.HUGGINGFACE_PUSH }}
|
||||
|
||||
- name: Loading cache.
|
||||
uses: actions/cache@v2
|
||||
id: cache
|
||||
with:
|
||||
path: ~/.cache/pip
|
||||
key: v1-test_build_doc
|
||||
restore-keys: |
|
||||
v1-test_build_doc-${{ hashFiles('setup.py') }}
|
||||
v1-test_build_doc
|
||||
|
||||
- name: Setup environment
|
||||
run: |
|
||||
sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev
|
||||
|
||||
pip install git+https://github.com/huggingface/doc-builder
|
||||
pip install git+https://github.com/huggingface/transformers#egg=transformers[dev]
|
||||
|
||||
export TORCH_VERSION=$(python -c "from torch import version; print(version.__version__.split('+')[0])")
|
||||
pip install torch-scatter -f https://data.pyg.org/whl/torch-${TORCH_VERSION}+cpu.html
|
||||
|
||||
pip install torchvision
|
||||
python -m pip install 'git+https://github.com/facebookresearch/detectron2.git'
|
||||
|
||||
sudo apt install tesseract-ocr
|
||||
pip install pytesseract
|
||||
pip install pytorch-quantization --extra-index-url https://pypi.ngc.nvidia.com
|
||||
pip install https://github.com/kpu/kenlm/archive/master.zip
|
||||
|
||||
- name: Setup git
|
||||
run: |
|
||||
git config --global user.name "Hugging Face"
|
||||
git config --global user.email transformers@huggingface.co
|
||||
|
||||
cd doc-builder
|
||||
git pull origin main
|
||||
cd ..
|
||||
|
||||
cd notebooks
|
||||
git pull origin master
|
||||
cd ..
|
||||
|
||||
- name: Make documentation
|
||||
run: |
|
||||
doc-builder build transformers transformers/docs/source --build_dir doc-builder/build --notebook_dir notebooks/transformers_doc --clean
|
||||
|
||||
- name: Push to repositories
|
||||
run: |
|
||||
cd doc-builder
|
||||
if [[ `git status --porcelain` ]]; then
|
||||
git add build
|
||||
git commit -m "Updated with commit ${{ github.sha }}"
|
||||
git push origin main
|
||||
else
|
||||
echo "No diff in the documentation."
|
||||
fi
|
||||
cd ..
|
||||
|
||||
cd notebooks
|
||||
if [[ `git status --porcelain` ]]; then
|
||||
git add transformers_doc
|
||||
git commit -m "Updated Transformer doc notebooks with commit ${{ github.sha }}"
|
||||
git push origin master
|
||||
else
|
||||
echo "No diff in the notebooks."
|
||||
fi
|
||||
cd ..
|
||||
build:
|
||||
uses: huggingface/doc-builder/.github/workflows/build_main_documentation.yml@main
|
||||
with:
|
||||
commit_sha: ${{ github.sha }}
|
||||
package: transformers
|
||||
notebook_folder: transformers_doc
|
||||
languages: en es
|
||||
secrets:
|
||||
token: ${{ secrets.HUGGINGFACE_PUSH }}
|
||||
|
||||
17
.github/workflows/build_pr_documentation.yml
vendored
Normal file
17
.github/workflows/build_pr_documentation.yml
vendored
Normal file
@ -0,0 +1,17 @@
|
||||
name: Build PR Documentation
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
build:
|
||||
uses: huggingface/doc-builder/.github/workflows/build_pr_documentation.yml@main
|
||||
with:
|
||||
commit_sha: ${{ github.event.pull_request.head.sha }}
|
||||
pr_number: ${{ github.event.number }}
|
||||
package: transformers
|
||||
languages: en es
|
||||
13
.github/workflows/delete_doc_comment.yml
vendored
Normal file
13
.github/workflows/delete_doc_comment.yml
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
name: Delete dev documentation
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [ closed ]
|
||||
|
||||
|
||||
jobs:
|
||||
delete:
|
||||
uses: huggingface/doc-builder/.github/workflows/delete_doc_comment.yml@main
|
||||
with:
|
||||
pr_number: ${{ github.event.number }}
|
||||
package: transformers
|
||||
60
.github/workflows/doctests.yml
vendored
60
.github/workflows/doctests.yml
vendored
@ -15,28 +15,66 @@ env:
|
||||
RUN_SLOW: yes
|
||||
OMP_NUM_THREADS: 16
|
||||
MKL_NUM_THREADS: 16
|
||||
PYTEST_TIMEOUT: 600
|
||||
SIGOPT_API_TOKEN: ${{ secrets.SIGOPT_API_TOKEN }}
|
||||
TF_FORCE_GPU_ALLOW_GROWTH: true
|
||||
|
||||
jobs:
|
||||
run_doctests:
|
||||
runs-on: [self-hosted, docker-gpu, single-gpu]
|
||||
runs-on: [self-hosted, doc-tests-gpu]
|
||||
container:
|
||||
image: pytorch/pytorch:1.9.0-cuda11.1-cudnn8-runtime
|
||||
image: huggingface/transformers-all-latest-gpu
|
||||
options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||
steps:
|
||||
- name: Launcher docker
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- uses: actions/checkout@v2
|
||||
- name: NVIDIA-SMI
|
||||
run: |
|
||||
nvidia-smi
|
||||
|
||||
- name: Install dependencies
|
||||
- name: GPU visibility
|
||||
run: |
|
||||
apt -y update && apt install -y libsndfile1-dev
|
||||
pip install --upgrade pip
|
||||
pip install .[dev]
|
||||
utils/print_env_pt.py
|
||||
TF_CPP_MIN_LOG_LEVEL=3 python3 -c "import tensorflow as tf; print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU')))"
|
||||
TF_CPP_MIN_LOG_LEVEL=3 python3 -c "import tensorflow as tf; print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU')))"
|
||||
|
||||
- name: Prepare files for doctests
|
||||
run: |
|
||||
python3 utils/prepare_for_doc_test.py src docs
|
||||
|
||||
- name: Run doctests
|
||||
run: |
|
||||
pytest --doctest-modules $(cat utils/documentation_tests.txt) -sv --doctest-continue-on-failure
|
||||
python3 -m pytest -v --make-reports doc_tests_gpu --doctest-modules $(cat utils/documentation_tests.txt) -sv --doctest-continue-on-failure --doctest-glob="*.mdx"
|
||||
|
||||
- name: Clean files after doctests
|
||||
run: |
|
||||
python3 utils/prepare_for_doc_test.py src docs --remove_new_line
|
||||
|
||||
- name: Failure short reports
|
||||
if: ${{ failure() }}
|
||||
continue-on-error: true
|
||||
run: cat reports/doc_tests_gpu/failures_short.txt
|
||||
|
||||
- name: Test suite reports artifacts
|
||||
if: ${{ always() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: doc_tests_gpu_test_reports
|
||||
path: reports/doc_tests_gpu
|
||||
|
||||
|
||||
send_results:
|
||||
name: Send results to webhook
|
||||
runs-on: ubuntu-latest
|
||||
if: always()
|
||||
needs: [run_doctests]
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/download-artifact@v2
|
||||
- name: Send message to Slack
|
||||
env:
|
||||
CI_SLACK_BOT_TOKEN: ${{ secrets.CI_SLACK_BOT_TOKEN }}
|
||||
CI_SLACK_CHANNEL_ID: ${{ secrets.CI_SLACK_CHANNEL_ID_DAILY_DOCS }}
|
||||
CI_SLACK_CHANNEL_ID_DAILY: ${{ secrets.CI_SLACK_CHANNEL_ID_DAILY_DOCS }}
|
||||
CI_SLACK_CHANNEL_DUMMY_TESTS: ${{ secrets.CI_SLACK_CHANNEL_DUMMY_TESTS }}
|
||||
run: |
|
||||
pip install slack_sdk
|
||||
python utils/notification_service_doc_tests.py
|
||||
|
||||
10
.github/workflows/model-templates.yml
vendored
10
.github/workflows/model-templates.yml
vendored
@ -3,7 +3,7 @@ name: Model templates runner
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- main
|
||||
pull_request:
|
||||
paths:
|
||||
- "src/**"
|
||||
@ -60,16 +60,16 @@ jobs:
|
||||
|
||||
- name: Run style changes
|
||||
run: |
|
||||
git fetch origin master:master
|
||||
make style && make quality
|
||||
git fetch origin main:main
|
||||
make style && make quality && make repo-consistency
|
||||
|
||||
- name: Failure short reports
|
||||
if: ${{ always() }}
|
||||
run: cat reports/tests_templates_failures_short.txt
|
||||
run: cat reports/tests_templates/failures_short.txt
|
||||
|
||||
- name: Test suite reports artifacts
|
||||
if: ${{ always() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: run_all_tests_templates_test_reports
|
||||
path: reports
|
||||
path: reports/tests_templates
|
||||
|
||||
12
.github/workflows/self-nightly-scheduled.yml
vendored
12
.github/workflows/self-nightly-scheduled.yml
vendored
@ -33,9 +33,10 @@ jobs:
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
apt -y update && apt install -y libsndfile1-dev git
|
||||
apt -y update && apt install -y libsndfile1-dev git espeak-ng
|
||||
pip install --upgrade pip
|
||||
pip install .[integrations,sklearn,testing,onnxruntime,sentencepiece,torch-speech,vision,timm]
|
||||
pip install https://github.com/kpu/kenlm/archive/master.zip
|
||||
pip install --pre torch torchvision torchaudio -f https://download.pytorch.org/whl/nightly/cu113/torch_nightly.html -U
|
||||
|
||||
- name: Are GPUs recognized by our DL frameworks
|
||||
@ -100,9 +101,10 @@ jobs:
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
apt -y update && apt install -y libsndfile1-dev git
|
||||
apt -y update && apt install -y libsndfile1-dev git espeak-ng
|
||||
pip install --upgrade pip
|
||||
pip install .[integrations,sklearn,testing,onnxruntime,sentencepiece,torch-speech,vision,timm]
|
||||
pip install https://github.com/kpu/kenlm/archive/master.zip
|
||||
pip install --pre torch torchvision torchaudio -f https://download.pytorch.org/whl/nightly/cu113/torch_nightly.html -U
|
||||
|
||||
- name: Are GPUs recognized by our DL frameworks
|
||||
@ -152,10 +154,11 @@ jobs:
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
apt -y update && apt install -y libaio-dev
|
||||
apt -y update && apt install -y libaio-dev libsndfile1-dev git espeak-ng
|
||||
pip install --upgrade pip
|
||||
pip install --pre torch torchvision torchaudio -f https://download.pytorch.org/whl/nightly/cu113/torch_nightly.html -U
|
||||
pip install .[testing,deepspeed]
|
||||
pip install https://github.com/kpu/kenlm/archive/master.zip
|
||||
pip install git+https://github.com/microsoft/DeepSpeed
|
||||
|
||||
- name: Are GPUs recognized by our DL frameworks
|
||||
@ -193,11 +196,12 @@ jobs:
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
apt -y update && apt install -y libaio-dev
|
||||
apt -y update && apt install -y libaio-dev libsndfile1-dev git espeak-ng
|
||||
pip install --upgrade pip
|
||||
pip install --pre torch torchvision torchaudio -f https://download.pytorch.org/whl/nightly/cu113/torch_nightly.html -U
|
||||
rm -rf ~/.cache/torch_extensions/ # shared between conflicting builds
|
||||
pip install .[testing,fairscale]
|
||||
pip install https://github.com/kpu/kenlm/archive/master.zip
|
||||
pip install git+https://github.com/microsoft/DeepSpeed # testing bleeding edge
|
||||
|
||||
- name: Are GPUs recognized by our DL frameworks
|
||||
|
||||
24
.github/workflows/self-push.yml
vendored
24
.github/workflows/self-push.yml
vendored
@ -3,7 +3,7 @@ name: Self-hosted runner (push)
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- main
|
||||
- ci_*
|
||||
- ci-*
|
||||
paths:
|
||||
@ -31,7 +31,7 @@ jobs:
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
apt -y update && apt install -y software-properties-common && apt -y update && add-apt-repository -y ppa:git-core/ppa && apt -y update && apt install -y git
|
||||
apt install -y libsndfile1-dev
|
||||
apt install -y libsndfile1-dev espeak-ng
|
||||
pip install --upgrade pip
|
||||
pip install .[sklearn,testing,onnxruntime,sentencepiece,torch-speech,vision,timm]
|
||||
pip install https://github.com/kpu/kenlm/archive/master.zip
|
||||
@ -82,13 +82,17 @@ jobs:
|
||||
image: tensorflow/tensorflow:2.4.1-gpu
|
||||
options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||
steps:
|
||||
- name: Set up Python 3.7
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: 3.7
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
apt -y update && apt install -y software-properties-common && apt -y update && add-apt-repository -y ppa:git-core/ppa && apt -y update && apt install -y git
|
||||
apt -y update && apt install -y software-properties-common && apt -y update && add-apt-repository -y ppa:git-core/ppa && apt -y update && apt install -y git espeak-ng
|
||||
pip install --upgrade "jax[cuda111]" -f https://storage.googleapis.com/jax-releases/jax_releases.html
|
||||
pip install --upgrade pip
|
||||
pip install .[sklearn,testing,sentencepiece,flax,flax-speech,vision]
|
||||
pip install https://github.com/kpu/kenlm/archive/master.zip
|
||||
|
||||
- name: Launcher docker
|
||||
uses: actions/checkout@v2
|
||||
@ -141,7 +145,7 @@ jobs:
|
||||
# steps:
|
||||
# - name: Install dependencies
|
||||
# run: |
|
||||
# apt -y update && apt install -y software-properties-common && apt -y update && add-apt-repository -y ppa:git-core/ppa && apt -y update && apt install -y git
|
||||
# apt -y update && apt install -y software-properties-common && apt -y update && add-apt-repository -y ppa:git-core/ppa && apt -y update && apt install -y git espeak-ng
|
||||
# pip install --upgrade pip
|
||||
# pip install .[sklearn,testing,onnxruntime,sentencepiece,tf-speech]
|
||||
# pip install https://github.com/kpu/kenlm/archive/master.zip
|
||||
@ -199,8 +203,8 @@ jobs:
|
||||
steps:
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
apt -y update && apt install -y software-properties-common && apt -y update && add-apt-repository -y ppa:git-core/ppa && apt -y update && apt install -y git
|
||||
apt install -y libsndfile1-dev
|
||||
apt -y update && apt install -y software-properties-common && apt -y update && add-apt-repository -y ppa:git-core/ppa && apt -y update && apt install -y git espeak-ng
|
||||
apt install -y libsndfile1-dev espeak-ng
|
||||
pip install --upgrade pip
|
||||
pip install .[sklearn,testing,onnxruntime,sentencepiece,torch-speech,vision,timm]
|
||||
pip install https://github.com/kpu/kenlm/archive/master.zip
|
||||
@ -255,7 +259,7 @@ jobs:
|
||||
# steps:
|
||||
# - name: Install dependencies
|
||||
# run: |
|
||||
# apt -y update && apt install -y software-properties-common && apt -y update && add-apt-repository -y ppa:git-core/ppa && apt -y update && apt install -y git
|
||||
# apt -y update && apt install -y software-properties-common && apt -y update && add-apt-repository -y ppa:git-core/ppa && apt -y update && apt install -y git espeak-ng
|
||||
# pip install --upgrade "jax[cuda111]" -f https://storage.googleapis.com/jax-releases/jax_releases.html
|
||||
# pip install --upgrade pip
|
||||
# pip install .[sklearn,testing,sentencepiece,flax,flax-speech,vision]
|
||||
@ -312,7 +316,7 @@ jobs:
|
||||
# steps:
|
||||
# - name: Install dependencies
|
||||
# run: |
|
||||
# apt -y update && apt install -y software-properties-common && apt -y update && add-apt-repository -y ppa:git-core/ppa && apt -y update && apt install -y git
|
||||
# apt -y update && apt install -y software-properties-common && apt -y update && add-apt-repository -y ppa:git-core/ppa && apt -y update && apt install -y git espeak-ng
|
||||
# pip install --upgrade pip
|
||||
# pip install .[sklearn,testing,onnxruntime,sentencepiece,tf-speech]
|
||||
# pip install https://github.com/kpu/kenlm/archive/master.zip
|
||||
@ -492,4 +496,4 @@ jobs:
|
||||
|
||||
run: |
|
||||
pip install slack_sdk
|
||||
python utils/notification_service.py push
|
||||
python utils/notification_service_deprecated.py push
|
||||
|
||||
527
.github/workflows/self-scheduled.yml
vendored
527
.github/workflows/self-scheduled.yml
vendored
@ -1,477 +1,254 @@
|
||||
name: Self-hosted runner (scheduled)
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- multi_ci_*
|
||||
repository_dispatch:
|
||||
schedule:
|
||||
- cron: "0 0 * * *"
|
||||
- cron: "0 2 * * *"
|
||||
|
||||
env:
|
||||
HF_HOME: /mnt/cache
|
||||
TRANSFORMERS_IS_CI: yes
|
||||
OMP_NUM_THREADS: 8
|
||||
MKL_NUM_THREADS: 8
|
||||
RUN_SLOW: yes
|
||||
OMP_NUM_THREADS: 16
|
||||
MKL_NUM_THREADS: 16
|
||||
PYTEST_TIMEOUT: 600
|
||||
SIGOPT_API_TOKEN: ${{ secrets.SIGOPT_API_TOKEN }}
|
||||
TF_FORCE_GPU_ALLOW_GROWTH: true
|
||||
RUN_PT_TF_CROSS_TESTS: 1
|
||||
|
||||
jobs:
|
||||
run_all_tests_torch_gpu:
|
||||
runs-on: [self-hosted, docker-gpu, single-gpu]
|
||||
setup:
|
||||
name: Setup
|
||||
strategy:
|
||||
matrix:
|
||||
machines: [multi-gpu-docker, single-gpu-docker]
|
||||
runs-on: ${{ matrix.machines }}
|
||||
container:
|
||||
image: pytorch/pytorch:1.9.0-cuda11.1-cudnn8-runtime
|
||||
image: huggingface/transformers-all-latest-gpu
|
||||
options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||
outputs:
|
||||
matrix: ${{ steps.set-matrix.outputs.matrix }}
|
||||
steps:
|
||||
- name: Launcher docker
|
||||
uses: actions/checkout@v2
|
||||
- name: Update clone
|
||||
working-directory: /transformers
|
||||
run: |
|
||||
git fetch && git checkout ${{ github.sha }}
|
||||
|
||||
- name: Cleanup
|
||||
working-directory: /transformers
|
||||
run: |
|
||||
rm -rf tests/__pycache__
|
||||
rm -rf reports
|
||||
|
||||
- id: set-matrix
|
||||
name: Identify models to test
|
||||
working-directory: /transformers/tests
|
||||
run: |
|
||||
echo "::set-output name=matrix::$(python3 -c 'import os; x = list(filter(os.path.isdir, os.listdir(os.getcwd()))); x.sort(); print(x)')"
|
||||
|
||||
- name: NVIDIA-SMI
|
||||
run: |
|
||||
nvidia-smi
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
apt -y update && apt install -y libsndfile1-dev git
|
||||
pip install --upgrade pip
|
||||
pip install .[integrations,sklearn,testing,onnxruntime,sentencepiece,torch-speech,vision,timm]
|
||||
pip install https://github.com/kpu/kenlm/archive/master.zip
|
||||
|
||||
- name: Are GPUs recognized by our DL frameworks
|
||||
- name: GPU visibility
|
||||
working-directory: /transformers
|
||||
run: |
|
||||
utils/print_env_pt.py
|
||||
TF_CPP_MIN_LOG_LEVEL=3 python3 -c "import tensorflow as tf; print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU')))"
|
||||
TF_CPP_MIN_LOG_LEVEL=3 python3 -c "import tensorflow as tf; print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU')))"
|
||||
|
||||
run_tests_gpu:
|
||||
name: Model tests
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
folders: ${{ fromJson(needs.setup.outputs.matrix) }}
|
||||
machines: [multi-gpu-docker, single-gpu-docker]
|
||||
runs-on: ${{ matrix.machines }}
|
||||
container:
|
||||
image: huggingface/transformers-all-latest-gpu
|
||||
options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||
needs: setup
|
||||
steps:
|
||||
- name: Echo folder ${{ matrix.folders }}
|
||||
run: echo "${{ matrix.folders }}"
|
||||
|
||||
- name: Update clone
|
||||
working-directory: /transformers
|
||||
run: git fetch && git checkout ${{ github.sha }}
|
||||
|
||||
- name: Run all tests on GPU
|
||||
run: |
|
||||
python -m pytest -n 1 -v --dist=loadfile --make-reports=tests_torch_gpu tests
|
||||
working-directory: /transformers
|
||||
run: python3 -m pytest -v --make-reports=${{ matrix.machines }}_tests_gpu_${{ matrix.folders }} tests/${{ matrix.folders }}
|
||||
|
||||
- name: Failure short reports
|
||||
if: ${{ failure() }}
|
||||
continue-on-error: true
|
||||
run: cat /transformers/reports/${{ matrix.machines }}_tests_gpu_${{ matrix.folders }}/failures_short.txt
|
||||
|
||||
- name: Test suite reports artifacts
|
||||
if: ${{ always() }}
|
||||
run: cat reports/tests_torch_gpu_failures_short.txt
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ matrix.machines }}_run_all_tests_gpu_${{ matrix.folders }}_test_reports
|
||||
path: /transformers/reports/${{ matrix.machines }}_tests_gpu_${{ matrix.folders }}
|
||||
|
||||
run_examples_gpu:
|
||||
name: Examples directory
|
||||
runs-on: [self-hosted, single-gpu-docker]
|
||||
container:
|
||||
image: huggingface/transformers-all-latest-gpu
|
||||
options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||
needs: setup
|
||||
steps:
|
||||
- name: Update clone
|
||||
working-directory: /transformers
|
||||
run: git fetch && git checkout ${{ github.sha }}
|
||||
|
||||
- name: Run examples tests on GPU
|
||||
if: ${{ always() }}
|
||||
env:
|
||||
OMP_NUM_THREADS: 16
|
||||
MKL_NUM_THREADS: 16
|
||||
RUN_SLOW: yes
|
||||
HF_HOME: /mnt/cache
|
||||
TRANSFORMERS_IS_CI: yes
|
||||
working-directory: /transformers
|
||||
run: |
|
||||
pip install -r examples/pytorch/_tests_requirements.txt
|
||||
python -m pytest -n 1 -v --dist=loadfile --make-reports=examples_torch_gpu examples
|
||||
python3 -m pytest -v --make-reports=examples_gpu examples/pytorch
|
||||
|
||||
- name: Failure short reports
|
||||
if: ${{ always() }}
|
||||
run: cat reports/examples_torch_gpu_failures_short.txt
|
||||
|
||||
- name: Run all pipeline tests on GPU
|
||||
if: ${{ always() }}
|
||||
env:
|
||||
RUN_PIPELINE_TESTS: yes
|
||||
run: |
|
||||
python -m pytest -n 1 -v --dist=loadfile -m is_pipeline_test --make-reports=tests_torch_pipeline_gpu tests
|
||||
|
||||
- name: Failure short reports
|
||||
if: ${{ always() }}
|
||||
run: cat reports/tests_torch_pipeline_gpu_failures_short.txt
|
||||
if: ${{ failure() }}
|
||||
continue-on-error: true
|
||||
run: cat /transformers/reports/examples_gpu/failures_short.txt
|
||||
|
||||
- name: Test suite reports artifacts
|
||||
if: ${{ always() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: run_all_tests_torch_gpu_test_reports
|
||||
path: reports
|
||||
name: run_examples_gpu
|
||||
path: /transformers/reports/examples_gpu
|
||||
|
||||
run_all_tests_flax_gpu:
|
||||
runs-on: [self-hosted, docker-gpu-test, single-gpu]
|
||||
run_pipelines_torch_gpu:
|
||||
name: PyTorch pipelines
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
machines: [multi-gpu-docker, single-gpu-docker]
|
||||
runs-on: ${{ matrix.machines }}
|
||||
container:
|
||||
image: tensorflow/tensorflow:2.4.1-gpu
|
||||
image: huggingface/transformers-pytorch-gpu
|
||||
options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||
needs: setup
|
||||
steps:
|
||||
- name: Launcher docker
|
||||
uses: actions/checkout@v2
|
||||
- name: Update clone
|
||||
working-directory: /transformers
|
||||
run: git fetch && git checkout ${{ github.sha }}
|
||||
|
||||
- name: NVIDIA-SMI
|
||||
continue-on-error: true
|
||||
- name: Run all pipeline tests on GPU
|
||||
working-directory: /transformers
|
||||
env:
|
||||
RUN_PIPELINE_TESTS: yes
|
||||
run: |
|
||||
nvidia-smi
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
pip install --upgrade pip
|
||||
pip install --upgrade "jax[cuda111]" -f https://storage.googleapis.com/jax-releases/jax_releases.html
|
||||
pip install .[flax,integrations,sklearn,testing,sentencepiece,flax-speech,vision]
|
||||
pip install https://github.com/kpu/kenlm/archive/master.zip
|
||||
|
||||
- name: Are GPUs recognized by our DL frameworks
|
||||
run: |
|
||||
python -c "from jax.lib import xla_bridge; print('GPU available:', xla_bridge.get_backend().platform)"
|
||||
python -c "import jax; print('Number of GPUs available:', len(jax.local_devices()))"
|
||||
|
||||
- name: Run all tests on GPU
|
||||
run: |
|
||||
python -m pytest -n 1 -v --dist=loadfile --make-reports=tests_flax_gpu tests
|
||||
python3 -m pytest -n 1 -v --dist=loadfile -m is_pipeline_test --make-reports=${{ matrix.machines }}_tests_torch_pipeline_gpu tests
|
||||
|
||||
- name: Failure short reports
|
||||
if: ${{ always() }}
|
||||
run: cat reports/tests_flax_gpu_failures_short.txt
|
||||
if: ${{ failure() }}
|
||||
continue-on-error: true
|
||||
run: cat /transformers/reports/${{ matrix.machines }}_tests_torch_pipeline_gpu/failures_short.txt
|
||||
|
||||
- name: Test suite reports artifacts
|
||||
if: ${{ always() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: run_all_tests_flax_gpu_test_reports
|
||||
path: reports
|
||||
name: ${{ matrix.machines }}_run_tests_torch_pipeline_gpu
|
||||
path: /transformers/reports/${{ matrix.machines }}_tests_torch_pipeline_gpu
|
||||
|
||||
run_all_tests_tf_gpu:
|
||||
runs-on: [self-hosted, docker-gpu, single-gpu]
|
||||
run_pipelines_tf_gpu:
|
||||
name: TensorFlow pipelines
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
machines: [multi-gpu-docker, single-gpu-docker]
|
||||
runs-on: ${{ matrix.machines }}
|
||||
container:
|
||||
image: tensorflow/tensorflow:2.4.1-gpu
|
||||
image: huggingface/transformers-tensorflow-gpu
|
||||
options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||
needs: setup
|
||||
steps:
|
||||
- name: Launcher docker
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: NVIDIA-SMI
|
||||
- name: Update clone
|
||||
working-directory: /transformers
|
||||
run: |
|
||||
nvidia-smi
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
apt -y update && apt install -y libsndfile1-dev git
|
||||
pip install --upgrade pip
|
||||
pip install .[sklearn,testing,onnx,sentencepiece,tf-speech,vision]
|
||||
pip install https://github.com/kpu/kenlm/archive/master.zip
|
||||
|
||||
|
||||
- name: Are GPUs recognized by our DL frameworks
|
||||
run: |
|
||||
TF_CPP_MIN_LOG_LEVEL=3 python -c "import tensorflow as tf; print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU')))"
|
||||
TF_CPP_MIN_LOG_LEVEL=3 python -c "import tensorflow as tf; print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU')))"
|
||||
|
||||
- name: Run all tests on GPU
|
||||
env:
|
||||
TF_NUM_INTEROP_THREADS: 1
|
||||
TF_NUM_INTRAOP_THREADS: 16
|
||||
run: |
|
||||
python -m pytest -n 1 -v --dist=loadfile --make-reports=tests_tf_gpu tests
|
||||
|
||||
- name: Failure short reports
|
||||
if: ${{ always() }}
|
||||
run: cat reports/tests_tf_gpu_failures_short.txt
|
||||
git fetch && git checkout ${{ github.sha }}
|
||||
|
||||
- name: Run all pipeline tests on GPU
|
||||
if: ${{ always() }}
|
||||
env:
|
||||
RUN_PIPELINE_TESTS: yes
|
||||
TF_NUM_INTEROP_THREADS: 1
|
||||
TF_NUM_INTRAOP_THREADS: 16
|
||||
run: |
|
||||
python -m pytest -n 1 -v --dist=loadfile -m is_pipeline_test --make-reports=tests_tf_pipeline_gpu tests
|
||||
|
||||
- name: Failure short reports
|
||||
if: ${{ always() }}
|
||||
run: cat reports/tests_tf_pipeline_gpu_failures_short.txt
|
||||
|
||||
- name: Test suite reports artifacts
|
||||
if: ${{ always() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: run_all_tests_tf_gpu_test_reports
|
||||
path: reports
|
||||
|
||||
run_all_examples_torch_xla_tpu:
|
||||
runs-on: [self-hosted, docker-tpu-test, tpu-v3-8]
|
||||
container:
|
||||
image: gcr.io/tpu-pytorch/xla:nightly_3.8_tpuvm
|
||||
options: --privileged -v "/lib/libtpu.so:/lib/libtpu.so" -v /mnt/cache/.cache/huggingface:/mnt/cache/ --shm-size 16G
|
||||
steps:
|
||||
- name: Launcher docker
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
pip install --upgrade pip
|
||||
pip install .[testing]
|
||||
|
||||
- name: Are TPUs recognized by our DL frameworks
|
||||
env:
|
||||
XRT_TPU_CONFIG: localservice;0;localhost:51011
|
||||
run: |
|
||||
python -c "import torch_xla.core.xla_model as xm; print(xm.xla_device())"
|
||||
|
||||
- name: Run example tests on TPU
|
||||
env:
|
||||
XRT_TPU_CONFIG: "localservice;0;localhost:51011"
|
||||
MKL_SERVICE_FORCE_INTEL: "1" # See: https://github.com/pytorch/pytorch/issues/37377
|
||||
|
||||
run: |
|
||||
python -m pytest -n 1 -v --dist=loadfile --make-reports=tests_torch_xla_tpu examples/pytorch/test_xla_examples.py
|
||||
|
||||
- name: Failure short reports
|
||||
if: ${{ always() }}
|
||||
run: cat reports/tests_torch_xla_tpu_failures_short.txt
|
||||
|
||||
- name: Test suite reports artifacts
|
||||
if: ${{ always() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: run_all_examples_torch_xla_tpu
|
||||
path: reports
|
||||
|
||||
run_all_tests_torch_multi_gpu:
|
||||
runs-on: [self-hosted, docker-gpu, multi-gpu]
|
||||
container:
|
||||
image: pytorch/pytorch:1.9.0-cuda11.1-cudnn8-runtime
|
||||
options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||
steps:
|
||||
- name: Launcher docker
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: NVIDIA-SMI
|
||||
continue-on-error: true
|
||||
run: |
|
||||
nvidia-smi
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
apt -y update && apt install -y libsndfile1-dev git
|
||||
pip install --upgrade pip
|
||||
pip install .[integrations,sklearn,testing,onnxruntime,sentencepiece,torch-speech,vision,timm]
|
||||
pip install https://github.com/kpu/kenlm/archive/master.zip
|
||||
|
||||
- name: Are GPUs recognized by our DL frameworks
|
||||
run: |
|
||||
utils/print_env_pt.py
|
||||
|
||||
- name: Run all tests on GPU
|
||||
env:
|
||||
MKL_SERVICE_FORCE_INTEL: 1
|
||||
run: |
|
||||
python -m pytest -n 1 -v --dist=loadfile --make-reports=tests_torch_multi_gpu tests
|
||||
|
||||
- name: Failure short reports
|
||||
if: ${{ always() }}
|
||||
run: cat reports/tests_torch_multi_gpu_failures_short.txt
|
||||
|
||||
- name: Run all pipeline tests on GPU
|
||||
if: ${{ always() }}
|
||||
working-directory: /transformers
|
||||
env:
|
||||
RUN_PIPELINE_TESTS: yes
|
||||
run: |
|
||||
python -m pytest -n 1 -v --dist=loadfile -m is_pipeline_test --make-reports=tests_torch_pipeline_multi_gpu tests
|
||||
python3 -m pytest -n 1 -v --dist=loadfile -m is_pipeline_test --make-reports=${{ matrix.machines }}_tests_tf_pipeline_gpu tests
|
||||
|
||||
- name: Failure short reports
|
||||
if: ${{ always() }}
|
||||
run: cat reports/tests_torch_pipeline_multi_gpu_failures_short.txt
|
||||
run: |
|
||||
cat /transformers/reports/${{ matrix.machines }}_tests_tf_pipeline_gpu/failures_short.txt
|
||||
|
||||
- name: Test suite reports artifacts
|
||||
if: ${{ always() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: run_all_tests_torch_multi_gpu_test_reports
|
||||
path: reports
|
||||
|
||||
run_all_tests_tf_multi_gpu:
|
||||
runs-on: [self-hosted, docker-gpu, multi-gpu]
|
||||
container:
|
||||
image: tensorflow/tensorflow:2.4.1-gpu
|
||||
options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||
steps:
|
||||
- name: Launcher docker
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: NVIDIA-SMI
|
||||
continue-on-error: true
|
||||
run: |
|
||||
nvidia-smi
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
apt -y update && apt install -y libsndfile1-dev git
|
||||
pip install --upgrade pip
|
||||
pip install .[sklearn,testing,onnx,sentencepiece,tf-speech,vision]
|
||||
pip install https://github.com/kpu/kenlm/archive/master.zip
|
||||
|
||||
- name: Are GPUs recognized by our DL frameworks
|
||||
run: |
|
||||
TF_CPP_MIN_LOG_LEVEL=3 python -c "import tensorflow as tf; print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU')))"
|
||||
TF_CPP_MIN_LOG_LEVEL=3 python -c "import tensorflow as tf; print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU')))"
|
||||
|
||||
- name: Run all tests on GPU
|
||||
env:
|
||||
TF_NUM_INTEROP_THREADS: 1
|
||||
TF_NUM_INTRAOP_THREADS: 16
|
||||
run: |
|
||||
python -m pytest -n 1 -v --dist=loadfile --make-reports=tests_tf_multi_gpu tests
|
||||
|
||||
- name: Failure short reports
|
||||
if: ${{ always() }}
|
||||
run: cat reports/tests_tf_multi_gpu_failures_short.txt
|
||||
|
||||
- name: Run all pipeline tests on GPU
|
||||
if: ${{ always() }}
|
||||
env:
|
||||
RUN_PIPELINE_TESTS: yes
|
||||
TF_NUM_INTEROP_THREADS: 1
|
||||
TF_NUM_INTRAOP_THREADS: 16
|
||||
run: |
|
||||
python -m pytest -n 1 -v --dist=loadfile -m is_pipeline_test --make-reports=tests_tf_pipeline_multi_gpu tests
|
||||
|
||||
- name: Failure short reports
|
||||
if: ${{ always() }}
|
||||
run: cat reports/tests_tf_pipeline_multi_gpu_failures_short.txt
|
||||
|
||||
- name: Test suite reports artifacts
|
||||
if: ${{ always() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: run_all_tests_tf_multi_gpu_test_reports
|
||||
path: reports
|
||||
|
||||
# run_all_tests_flax_multi_gpu:
|
||||
# runs-on: [self-hosted, docker-gpu, multi-gpu]
|
||||
# container:
|
||||
# image: tensorflow/tensorflow:2.4.1-gpu
|
||||
# options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||
# steps:
|
||||
# - name: Launcher docker
|
||||
# uses: actions/checkout@v2
|
||||
#
|
||||
# - name: NVIDIA-SMI
|
||||
# run: |
|
||||
# nvidia-smi
|
||||
#
|
||||
# - name: Install dependencies
|
||||
# run: |
|
||||
# pip install --upgrade pip
|
||||
# pip install --upgrade "jax[cuda111]" -f https://storage.googleapis.com/jax-releases/jax_releases.html
|
||||
# pip install .[flax,integrations,sklearn,testing,sentencepiece,flax-speech,vision]
|
||||
#
|
||||
# - name: Are GPUs recognized by our DL frameworks
|
||||
# run: |
|
||||
# python -c "from jax.lib import xla_bridge; print('GPU available:', xla_bridge.get_backend().platform)"
|
||||
# python -c "import jax; print('Number of GPUs available:', len(jax.local_devices()))"
|
||||
#
|
||||
# - name: Run all tests on GPU
|
||||
# run: |
|
||||
# python -m pytest -n 1 -v --dist=loadfile --make-reports=tests_flax_gpu tests
|
||||
#
|
||||
# - name: Failure short reports
|
||||
# if: ${{ always() }}
|
||||
# run: cat reports/tests_flax_gpu_failures_short.txt
|
||||
#
|
||||
# - name: Test suite reports artifacts
|
||||
# if: ${{ always() }}
|
||||
# uses: actions/upload-artifact@v2
|
||||
# with:
|
||||
# name: run_all_tests_flax_gpu_test_reports
|
||||
# path: reports
|
||||
name: ${{ matrix.machines }}_run_tests_tf_pipeline_gpu
|
||||
path: /transformers/reports/${{ matrix.machines }}_tests_tf_pipeline_gpu
|
||||
|
||||
run_all_tests_torch_cuda_extensions_gpu:
|
||||
runs-on: [self-hosted, docker-gpu, single-gpu]
|
||||
name: Torch CUDA extension tests
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
machines: [multi-gpu-docker, single-gpu-docker]
|
||||
runs-on: ${{ matrix.machines }}
|
||||
needs: setup
|
||||
container:
|
||||
image: nvcr.io/nvidia/pytorch:21.03-py3
|
||||
options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||
image: huggingface/transformers-pytorch-deepspeed-latest-gpu
|
||||
options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||
steps:
|
||||
- name: Launcher docker
|
||||
uses: actions/checkout@v2
|
||||
- name: Update clone
|
||||
working-directory: /workspace/transformers
|
||||
run: git fetch && git checkout ${{ github.sha }}
|
||||
|
||||
- name: NVIDIA-SMI
|
||||
- name: Re-compile DeepSpeed
|
||||
working-directory: /workspace
|
||||
run: |
|
||||
nvidia-smi
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
apt -y update && apt install -y libaio-dev
|
||||
pip install --upgrade pip
|
||||
pip install .[testing,deepspeed]
|
||||
|
||||
- name: Are GPUs recognized by our DL frameworks
|
||||
run: |
|
||||
utils/print_env_pt.py
|
||||
pip install deepspeed # installs the deps correctly
|
||||
rm -rf DeepSpeed
|
||||
git clone https://github.com/microsoft/DeepSpeed && cd DeepSpeed && rm -rf build
|
||||
DS_BUILD_CPU_ADAM=1 DS_BUILD_AIO=1 DS_BUILD_UTILS=1 python3 -m pip install -e . --global-option="build_ext" --global-option="-j8" --no-cache -v --disable-pip-version-check
|
||||
|
||||
- name: Run all tests on GPU
|
||||
working-directory: /workspace/transformers
|
||||
run: |
|
||||
python -m pytest -n 1 -v --dist=loadfile --make-reports=tests_torch_cuda_extensions_gpu tests/deepspeed tests/extended
|
||||
python -m pytest -v --make-reports=${{ matrix.machines }}_tests_torch_cuda_extensions_gpu tests/deepspeed tests/extended
|
||||
|
||||
- name: Failure short reports
|
||||
if: ${{ always() }}
|
||||
run: cat reports/tests_torch_cuda_extensions_gpu_failures_short.txt
|
||||
|
||||
- name: Test suite reports artifacts
|
||||
if: ${{ always() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: run_tests_torch_cuda_extensions_gpu_test_reports
|
||||
path: reports
|
||||
|
||||
run_all_tests_torch_cuda_extensions_multi_gpu:
|
||||
runs-on: [self-hosted, docker-gpu, multi-gpu]
|
||||
container:
|
||||
image: nvcr.io/nvidia/pytorch:21.03-py3
|
||||
options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||
steps:
|
||||
- name: Launcher docker
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: NVIDIA-SMI
|
||||
if: ${{ failure() }}
|
||||
continue-on-error: true
|
||||
run: |
|
||||
nvidia-smi
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
apt -y update && apt install -y libaio-dev
|
||||
pip install --upgrade pip
|
||||
rm -rf ~/.cache/torch_extensions/ # shared between conflicting builds
|
||||
pip install .[testing,deepspeed,fairscale]
|
||||
|
||||
- name: Are GPUs recognized by our DL frameworks
|
||||
run: |
|
||||
utils/print_env_pt.py
|
||||
|
||||
- name: Run all tests on GPU
|
||||
run: |
|
||||
python -m pytest -n 1 -v --dist=loadfile --make-reports=tests_torch_cuda_extensions_multi_gpu tests/deepspeed tests/extended
|
||||
|
||||
- name: Failure short reports
|
||||
if: ${{ always() }}
|
||||
run: cat reports/tests_torch_cuda_extensions_multi_gpu_failures_short.txt
|
||||
run: cat /workspace/transformers/reports/${{ matrix.machines }}_tests_torch_cuda_extensions_gpu/failures_short.txt
|
||||
|
||||
- name: Test suite reports artifacts
|
||||
if: ${{ always() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: run_tests_torch_cuda_extensions_multi_gpu_test_reports
|
||||
path: reports
|
||||
name: ${{ matrix.machines }}_run_tests_torch_cuda_extensions_gpu_test_reports
|
||||
path: /workspace/transformers/reports/${{ matrix.machines }}_tests_torch_cuda_extensions_gpu
|
||||
|
||||
|
||||
send_results:
|
||||
name: Send results to webhook
|
||||
runs-on: ubuntu-latest
|
||||
if: always()
|
||||
needs: [
|
||||
run_all_tests_torch_gpu,
|
||||
run_all_tests_tf_gpu,
|
||||
run_all_tests_torch_multi_gpu,
|
||||
run_all_tests_tf_multi_gpu,
|
||||
run_all_tests_torch_cuda_extensions_gpu,
|
||||
run_all_tests_torch_cuda_extensions_multi_gpu
|
||||
]
|
||||
needs: [setup, run_tests_gpu, run_examples_gpu, run_pipelines_tf_gpu, run_pipelines_torch_gpu, run_all_tests_torch_cuda_extensions_gpu]
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- uses: actions/download-artifact@v2
|
||||
|
||||
- name: Send message to Slack
|
||||
env:
|
||||
CI_SLACK_BOT_TOKEN: ${{ secrets.CI_SLACK_BOT_TOKEN }}
|
||||
CI_SLACK_CHANNEL_ID: ${{ secrets.CI_SLACK_CHANNEL_ID }}
|
||||
CI_SLACK_CHANNEL_ID_DAILY: ${{ secrets.CI_SLACK_CHANNEL_ID_DAILY }}
|
||||
|
||||
|
||||
CI_SLACK_CHANNEL_DUMMY_TESTS: ${{ secrets.CI_SLACK_CHANNEL_DUMMY_TESTS }}
|
||||
run: |
|
||||
pip install slack_sdk
|
||||
python utils/notification_service.py scheduled
|
||||
python utils/notification_service.py "${{ needs.setup.outputs.matrix }}"
|
||||
|
||||
2
.github/workflows/update_metdata.yml
vendored
2
.github/workflows/update_metdata.yml
vendored
@ -3,7 +3,7 @@ name: Update Transformers metadata
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- main
|
||||
- update_transformers_metadata
|
||||
|
||||
jobs:
|
||||
|
||||
5
.gitignore
vendored
5
.gitignore
vendored
@ -160,4 +160,7 @@ tags
|
||||
.pre-commit*
|
||||
|
||||
# .lock
|
||||
*.lock
|
||||
*.lock
|
||||
|
||||
# DS_Store (MacOS)
|
||||
.DS_Store
|
||||
123
CONTRIBUTING.md
123
CONTRIBUTING.md
@ -26,7 +26,7 @@ on the awesome projects it made possible, shout out on Twitter every time it has
|
||||
helped you, or simply star the repo to say "thank you".
|
||||
|
||||
Whichever way you choose to contribute, please be mindful to respect our
|
||||
[code of conduct](https://github.com/huggingface/transformers/blob/master/CODE_OF_CONDUCT.md).
|
||||
[code of conduct](https://github.com/huggingface/transformers/blob/main/CODE_OF_CONDUCT.md).
|
||||
|
||||
## You can contribute in so many ways!
|
||||
|
||||
@ -92,7 +92,7 @@ If you are willing to contribute the model yourself, let us know so we can best
|
||||
guide you.
|
||||
|
||||
We have added a **detailed guide and templates** to guide you in the process of adding a new model. You can find them
|
||||
in the [`templates`](https://github.com/huggingface/transformers/tree/master/templates) folder.
|
||||
in the [`templates`](https://github.com/huggingface/transformers/tree/main/templates) folder.
|
||||
|
||||
### Do you want a new feature (that is not a model)?
|
||||
|
||||
@ -114,7 +114,7 @@ If your issue is well written we're already 80% of the way there by the time you
|
||||
post it.
|
||||
|
||||
We have added **templates** to guide you in the process of adding a new example script for training or testing the
|
||||
models in the library. You can find them in the [`templates`](https://github.com/huggingface/transformers/tree/master/templates)
|
||||
models in the library. You can find them in the [`templates`](https://github.com/huggingface/transformers/tree/main/templates)
|
||||
folder.
|
||||
|
||||
## Start contributing! (Pull Requests)
|
||||
@ -124,7 +124,7 @@ issues to make sure that nobody is already working on the same thing. If you are
|
||||
unsure, it is always a good idea to open an issue to get some feedback.
|
||||
|
||||
You will need basic `git` proficiency to be able to contribute to
|
||||
`transformers`. `git` is not the easiest tool to use but it has the greatest
|
||||
🤗 Transformers. `git` is not the easiest tool to use but it has the greatest
|
||||
manual. Type `git --help` in a shell and enjoy. If you prefer books, [Pro
|
||||
Git](https://git-scm.com/book/en/v2) is a very good reference.
|
||||
|
||||
@ -148,7 +148,7 @@ Follow these steps to start contributing:
|
||||
$ git checkout -b a-descriptive-name-for-my-changes
|
||||
```
|
||||
|
||||
**Do not** work on the `master` branch.
|
||||
**Do not** work on the `main` branch.
|
||||
|
||||
4. Set up a development environment by running the following command in a virtual environment:
|
||||
|
||||
@ -175,34 +175,26 @@ Follow these steps to start contributing:
|
||||
5. Develop the features on your branch.
|
||||
|
||||
As you work on the features, you should make sure that the test suite
|
||||
passes:
|
||||
passes. You should run the tests impacted by your changes like this:
|
||||
|
||||
```bash
|
||||
$ pytest tests/<TEST_TO_RUN>.py
|
||||
```
|
||||
|
||||
You can also run the full suite with the following command, but it takes
|
||||
a beefy machine to produce a result in a decent amount of time now that
|
||||
Transformers has grown a lot. Here is the command for it:
|
||||
|
||||
```bash
|
||||
$ make test
|
||||
```
|
||||
|
||||
Note, that this command uses `-n auto` pytest flag, therefore, it will start as many parallel `pytest` processes as the number of your computer's CPU-cores, and if you have lots of those and a few GPUs and not a great amount of RAM, it's likely to overload your computer. Therefore, to run the test suite, you may want to consider using this command instead:
|
||||
For more information about tests, check out the
|
||||
[dedicated documentation](https://huggingface.co/docs/transformers/testing)
|
||||
|
||||
```bash
|
||||
$ python -m pytest -n 3 --dist=loadfile -s -v ./tests/
|
||||
```
|
||||
|
||||
Adjust the value of `-n` to fit the load your hardware can support.
|
||||
|
||||
`transformers` relies on `black` and `isort` to format its source code
|
||||
consistently. After you make changes, format them with:
|
||||
|
||||
```bash
|
||||
$ make style
|
||||
```
|
||||
|
||||
`transformers` also uses `flake8` and a few custom scripts to check for coding mistakes. Quality
|
||||
control runs in CI, however you can also run the same checks with:
|
||||
|
||||
```bash
|
||||
$ make quality
|
||||
```
|
||||
You can do the automatic style corrections and code verifications that can't be automated in one go:
|
||||
🤗 Transformers relies on `black` and `isort` to format its source code
|
||||
consistently. After you make changes, apply automatic style corrections and code verifications
|
||||
that can't be automated in one go with:
|
||||
|
||||
```bash
|
||||
$ make fixup
|
||||
@ -210,16 +202,55 @@ Follow these steps to start contributing:
|
||||
|
||||
This target is also optimized to only work with files modified by the PR you're working on.
|
||||
|
||||
If you're modifying documents under `docs/source`, make sure to validate that
|
||||
they can still be built. This check also runs in CI. To run a local check
|
||||
make sure you have installed the documentation builder requirements, by
|
||||
running `pip install .[tf,torch,docs]` once from the root of this repository
|
||||
and then run:
|
||||
If you prefer to run the checks one after the other, the following command apply the
|
||||
style corrections:
|
||||
|
||||
```bash
|
||||
$ make docs
|
||||
$ make style
|
||||
```
|
||||
|
||||
🤗 Transformers also uses `flake8` and a few custom scripts to check for coding mistakes. Quality
|
||||
control runs in CI, however you can also run the same checks with:
|
||||
|
||||
```bash
|
||||
$ make quality
|
||||
```
|
||||
|
||||
Finally we have a lot of scripts that check we didn't forget to update
|
||||
some files when adding a new model, that you can run with
|
||||
|
||||
```bash
|
||||
$ make repo-consistency
|
||||
```
|
||||
|
||||
To learn more about those checks and how to fix any issue with them, check out the
|
||||
[documentation](https://huggingface.co/docs/transformers/pr_checks)
|
||||
|
||||
If you're modifying documents under `docs/source`, make sure to validate that
|
||||
they can still be built. This check also runs in CI. To run a local check
|
||||
make sure you have installed the documentation builder requirements. First you will need to clone the
|
||||
repository containing our tools to build the documentation:
|
||||
|
||||
```bash
|
||||
$ pip install git+https://github.com/huggingface/doc-builder
|
||||
```
|
||||
|
||||
Then, make sure you have all the dependencies to be able to build the doc with:
|
||||
|
||||
```bash
|
||||
$ pip install ".[docs]"
|
||||
```
|
||||
|
||||
Finally run the following command from the root of the repository:
|
||||
|
||||
```bash
|
||||
$ doc-builder build transformers docs/source/ --build_dir ~/tmp/test-build
|
||||
```
|
||||
|
||||
This will build the documentation in the `~/tmp/test-build` folder where you can inspect the generated
|
||||
Markdown files with your favorite editor. You won't be able to see the final rendering on the website
|
||||
before your PR is merged, we are actively working on adding a tool for this.
|
||||
|
||||
Once you're happy with your changes, add changed files using `git add` and
|
||||
make a commit with `git commit` to record your changes locally:
|
||||
|
||||
@ -236,7 +267,7 @@ Follow these steps to start contributing:
|
||||
|
||||
```bash
|
||||
$ git fetch upstream
|
||||
$ git rebase upstream/master
|
||||
$ git rebase upstream/main
|
||||
```
|
||||
|
||||
Push the changes to your account using:
|
||||
@ -277,15 +308,17 @@ Follow these steps to start contributing:
|
||||
example.
|
||||
7. Due to the rapidly growing repository, it is important to make sure that no files that would significantly weigh down the repository are added. This includes images, videos and other non-text files. We prefer to leverage a hf.co hosted `dataset` like
|
||||
the ones hosted on [`hf-internal-testing`](https://huggingface.co/hf-internal-testing) in which to place these files and reference
|
||||
them by URL.
|
||||
them by URL. We recommend putting them in the following dataset: [huggingface/documentation-images](https://huggingface.co/datasets/huggingface/documentation-images).
|
||||
If an external contribution, feel free to add the images to your PR and ask a Hugging Face member to migrate your images
|
||||
to this dataset.
|
||||
|
||||
See more about the checks run on a pull request in our [PR guide](pr_checks)
|
||||
|
||||
### Tests
|
||||
|
||||
An extensive test suite is included to test the library behavior and several examples. Library tests can be found in
|
||||
the [tests folder](https://github.com/huggingface/transformers/tree/master/tests) and examples tests in the
|
||||
[examples folder](https://github.com/huggingface/transformers/tree/master/examples).
|
||||
the [tests folder](https://github.com/huggingface/transformers/tree/main/tests) and examples tests in the
|
||||
[examples folder](https://github.com/huggingface/transformers/tree/main/examples).
|
||||
|
||||
We like `pytest` and `pytest-xdist` because it's faster. From the root of the
|
||||
repository, here's how to run tests with `pytest` for the library:
|
||||
@ -331,11 +364,11 @@ $ python -m unittest discover -s examples -t examples -v
|
||||
|
||||
### Style guide
|
||||
|
||||
For documentation strings, `transformers` follows the [google style](https://google.github.io/styleguide/pyguide.html).
|
||||
Check our [documentation writing guide](https://github.com/huggingface/transformers/tree/master/docs#writing-documentation---specification)
|
||||
For documentation strings, 🤗 Transformers follows the [google style](https://google.github.io/styleguide/pyguide.html).
|
||||
Check our [documentation writing guide](https://github.com/huggingface/transformers/tree/main/docs#writing-documentation---specification)
|
||||
for more information.
|
||||
|
||||
#### This guide was heavily inspired by the awesome [scikit-learn guide to contributing](https://github.com/scikit-learn/scikit-learn/blob/master/CONTRIBUTING.md)
|
||||
#### This guide was heavily inspired by the awesome [scikit-learn guide to contributing](https://github.com/scikit-learn/scikit-learn/blob/main/CONTRIBUTING.md)
|
||||
|
||||
|
||||
### Develop on Windows
|
||||
@ -353,15 +386,15 @@ One way one can run the make command on Window is to pass by MSYS2:
|
||||
|
||||
You can now use `make` from any terminal (Powershell, cmd.exe, etc) 🎉
|
||||
|
||||
### Syncing forked master with upstream (HuggingFace) master
|
||||
### Syncing forked main with upstream (HuggingFace) main
|
||||
|
||||
To avoid pinging the upstream repository which adds reference notes to each upstream PR and sends unnessary notifications to the developers involved in these PRs,
|
||||
when syncing the master branch of a forked repository, please, follow these steps:
|
||||
1. When possible, avoid syncing with the upstream using a branch and PR on the forked repository. Instead merge directly into the forked master.
|
||||
To avoid pinging the upstream repository which adds reference notes to each upstream PR and sends unnecessary notifications to the developers involved in these PRs,
|
||||
when syncing the main branch of a forked repository, please, follow these steps:
|
||||
1. When possible, avoid syncing with the upstream using a branch and PR on the forked repository. Instead merge directly into the forked main.
|
||||
2. If a PR is absolutely necessary, use the following steps after checking out your branch:
|
||||
```
|
||||
$ git checkout -b your-branch-for-syncing
|
||||
$ git pull --squash --no-commit upstream master
|
||||
$ git pull --squash --no-commit upstream main
|
||||
$ git commit -m '<your message without GitHub references>'
|
||||
$ git push --set-upstream origin your-branch-for-syncing
|
||||
```
|
||||
|
||||
@ -71,8 +71,8 @@ You are not required to read the following guidelines before opening an issue. H
|
||||
File "/transformers/src/transformers/__init__.py", line 34, in <module>
|
||||
from . import dependency_versions_check
|
||||
File "/transformers/src/transformers/dependency_versions_check.py", line 34, in <module>
|
||||
from .file_utils import is_tokenizers_available
|
||||
File "/transformers/src/transformers/file_utils.py", line 40, in <module>
|
||||
from .utils import is_tokenizers_available
|
||||
File "/transformers/src/transformers/utils/import_utils.py", line 40, in <module>
|
||||
from tqdm.auto import tqdm
|
||||
ModuleNotFoundError: No module named 'tqdm.auto'
|
||||
```
|
||||
@ -124,8 +124,8 @@ You are not required to read the following guidelines before opening an issue. H
|
||||
File "/transformers/src/transformers/__init__.py", line 34, in <module>
|
||||
from . import dependency_versions_check
|
||||
File "/transformers/src/transformers/dependency_versions_check.py", line 34, in <module>
|
||||
from .file_utils import is_tokenizers_available
|
||||
File "/transformers/src/transformers/file_utils.py", line 40, in <module>
|
||||
from .utils import is_tokenizers_available
|
||||
File "/transformers/src/transformers/utils/import_utils.py", line 40, in <module>
|
||||
from tqdm.auto import tqdm
|
||||
ModuleNotFoundError: No module named 'tqdm.auto'
|
||||
```
|
||||
|
||||
11
Makefile
11
Makefile
@ -1,4 +1,4 @@
|
||||
.PHONY: deps_table_update modified_only_fixup extra_quality_checks quality style fixup fix-copies test test-examples docs
|
||||
.PHONY: deps_table_update modified_only_fixup extra_style_checks quality style fixup fix-copies test test-examples
|
||||
|
||||
# make sure to test the local checkout in scripts and not the pre-installed one (don't use quotes!)
|
||||
export PYTHONPATH = src
|
||||
@ -48,13 +48,13 @@ quality:
|
||||
isort --check-only $(check_dirs)
|
||||
python utils/custom_init_isort.py --check_only
|
||||
flake8 $(check_dirs)
|
||||
python utils/style_doc.py src/transformers docs/source --max_len 119 --check_only
|
||||
doc-builder style src/transformers docs/source --max_len 119 --check_only --path_to_docs docs/source
|
||||
|
||||
# Format source code automatically and check is there are any problems left that need manual fixing
|
||||
|
||||
extra_style_checks:
|
||||
python utils/custom_init_isort.py
|
||||
python utils/style_doc.py src/transformers docs/source --max_len 119
|
||||
doc-builder style src/transformers docs/source --max_len 119 --path_to_docs docs/source
|
||||
|
||||
# this target runs checks on all files and potentially modifies some of them
|
||||
|
||||
@ -91,11 +91,6 @@ test-sagemaker: # install sagemaker dependencies in advance with pip install .[s
|
||||
TEST_SAGEMAKER=True python -m pytest -n auto -s -v ./tests/sagemaker
|
||||
|
||||
|
||||
# Check that docs can build
|
||||
|
||||
docs:
|
||||
cd docs && make html SPHINXOPTS="-W -j 4"
|
||||
|
||||
# Release stuff
|
||||
|
||||
pre-release:
|
||||
|
||||
80
README.md
80
README.md
@ -16,14 +16,14 @@ limitations under the License.
|
||||
|
||||
<p align="center">
|
||||
<br>
|
||||
<img src="https://raw.githubusercontent.com/huggingface/transformers/master/docs/source/imgs/transformers_logo_name.png" width="400"/>
|
||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers_logo_name.png" width="400"/>
|
||||
<br>
|
||||
<p>
|
||||
<p align="center">
|
||||
<a href="https://circleci.com/gh/huggingface/transformers">
|
||||
<img alt="Build" src="https://img.shields.io/circleci/build/github/huggingface/transformers/master">
|
||||
<img alt="Build" src="https://img.shields.io/circleci/build/github/huggingface/transformers/main">
|
||||
</a>
|
||||
<a href="https://github.com/huggingface/transformers/blob/master/LICENSE">
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/LICENSE">
|
||||
<img alt="GitHub" src="https://img.shields.io/github/license/huggingface/transformers.svg?color=blue">
|
||||
</a>
|
||||
<a href="https://huggingface.co/docs/transformers/index">
|
||||
@ -32,7 +32,7 @@ limitations under the License.
|
||||
<a href="https://github.com/huggingface/transformers/releases">
|
||||
<img alt="GitHub release" src="https://img.shields.io/github/release/huggingface/transformers.svg">
|
||||
</a>
|
||||
<a href="https://github.com/huggingface/transformers/blob/master/CODE_OF_CONDUCT.md">
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/CODE_OF_CONDUCT.md">
|
||||
<img alt="Contributor Covenant" src="https://img.shields.io/badge/Contributor%20Covenant-v2.0%20adopted-ff69b4.svg">
|
||||
</a>
|
||||
<a href="https://zenodo.org/badge/latestdoi/155220641"><img src="https://zenodo.org/badge/155220641.svg" alt="DOI"></a>
|
||||
@ -41,9 +41,9 @@ limitations under the License.
|
||||
<h4 align="center">
|
||||
<p>
|
||||
<b>English</b> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/master/README_zh-hans.md">简体中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/master/README_zh-hant.md">繁體中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/master/README_ko.md">한국어</a>
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_zh-hans.md">简体中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_zh-hant.md">繁體中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_ko.md">한국어</a>
|
||||
<p>
|
||||
</h4>
|
||||
|
||||
@ -52,7 +52,7 @@ limitations under the License.
|
||||
</h3>
|
||||
|
||||
<h3 align="center">
|
||||
<a href="https://hf.co/course"><img src="https://raw.githubusercontent.com/huggingface/transformers/master/docs/source/imgs/course_banner.png"></a>
|
||||
<a href="https://hf.co/course"><img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/course_banner.png"></a>
|
||||
</h3>
|
||||
|
||||
🤗 Transformers provides thousands of pretrained models to perform tasks on different modalities such as text, vision, and audio.
|
||||
@ -185,7 +185,7 @@ The model itself is a regular [Pytorch `nn.Module`](https://pytorch.org/docs/sta
|
||||
|
||||
- This library is not a modular toolbox of building blocks for neural nets. The code in the model files is not refactored with additional abstractions on purpose, so that researchers can quickly iterate on each of the models without diving into additional abstractions/files.
|
||||
- The training API is not intended to work on any model but is optimized to work with the models provided by the library. For generic machine learning loops, you should use another library.
|
||||
- While we strive to present as many use cases as possible, the scripts in our [examples folder](https://github.com/huggingface/transformers/tree/master/examples) are just that: examples. It is expected that they won't work out-of-the box on your specific problem and that you will be required to change a few lines of code to adapt them to your needs.
|
||||
- While we strive to present as many use cases as possible, the scripts in our [examples folder](https://github.com/huggingface/transformers/tree/main/examples) are just that: examples. It is expected that they won't work out-of-the box on your specific problem and that you will be required to change a few lines of code to adapt them to your needs.
|
||||
|
||||
## Installation
|
||||
|
||||
@ -198,7 +198,7 @@ You should install 🤗 Transformers in a [virtual environment](https://docs.pyt
|
||||
First, create a virtual environment with the version of Python you're going to use and activate it.
|
||||
|
||||
Then, you will need to install at least one of Flax, PyTorch or TensorFlow.
|
||||
Please refer to [TensorFlow installation page](https://www.tensorflow.org/install/), [PyTorch installation page](https://pytorch.org/get-started/locally/#start-locally) and/or [Flax installation page](https://github.com/google/flax#quick-install) regarding the specific install command for your platform.
|
||||
Please refer to [TensorFlow installation page](https://www.tensorflow.org/install/), [PyTorch installation page](https://pytorch.org/get-started/locally/#start-locally) and/or [Flax](https://github.com/google/flax#quick-install) and [Jax](https://github.com/google/jax#installation) installation pages regarding the specific install command for your platform.
|
||||
|
||||
When one of those backends has been installed, 🤗 Transformers can be installed using pip as follows:
|
||||
|
||||
@ -229,46 +229,52 @@ Current number of checkpoints:  for a high-level summary of each them):
|
||||
|
||||
1. **[ALBERT](https://huggingface.co/docs/transformers/model_doc/albert)** (from Google Research and the Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.
|
||||
1. **[BART](https://huggingface.co/docs/transformers/model_doc/bart)** (from Facebook) released with the paper [BART: Denoising Sequence-to-Sequence Pre-training for Natural Language Generation, Translation, and Comprehension](https://arxiv.org/pdf/1910.13461.pdf) by Mike Lewis, Yinhan Liu, Naman Goyal, Marjan Ghazvininejad, Abdelrahman Mohamed, Omer Levy, Ves Stoyanov and Luke Zettlemoyer.
|
||||
1. **[BART](https://huggingface.co/docs/transformers/model_doc/bart)** (from Facebook) released with the paper [BART: Denoising Sequence-to-Sequence Pre-training for Natural Language Generation, Translation, and Comprehension](https://arxiv.org/abs/1910.13461) by Mike Lewis, Yinhan Liu, Naman Goyal, Marjan Ghazvininejad, Abdelrahman Mohamed, Omer Levy, Ves Stoyanov and Luke Zettlemoyer.
|
||||
1. **[BARThez](https://huggingface.co/docs/transformers/model_doc/barthez)** (from École polytechnique) released with the paper [BARThez: a Skilled Pretrained French Sequence-to-Sequence Model](https://arxiv.org/abs/2010.12321) by Moussa Kamal Eddine, Antoine J.-P. Tixier, Michalis Vazirgiannis.
|
||||
1. **[BARTpho](https://huggingface.co/docs/transformers/model_doc/bartpho)** (from VinAI Research) released with the paper [BARTpho: Pre-trained Sequence-to-Sequence Models for Vietnamese](https://arxiv.org/abs/2109.09701) by Nguyen Luong Tran, Duong Minh Le and Dat Quoc Nguyen.
|
||||
1. **[BEiT](https://huggingface.co/docs/transformers/model_doc/beit)** (from Microsoft) released with the paper [BEiT: BERT Pre-Training of Image Transformers](https://arxiv.org/abs/2106.08254) by Hangbo Bao, Li Dong, Furu Wei.
|
||||
1. **[BERT](https://huggingface.co/docs/transformers/model_doc/bert)** (from Google) released with the paper [BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding](https://arxiv.org/abs/1810.04805) by Jacob Devlin, Ming-Wei Chang, Kenton Lee and Kristina Toutanova.
|
||||
1. **[BERTweet](https://huggingface.co/docs/transformers/model_doc/bertweet)** (from VinAI Research) released with the paper [BERTweet: A pre-trained language model for English Tweets](https://aclanthology.org/2020.emnlp-demos.2/) by Dat Quoc Nguyen, Thanh Vu and Anh Tuan Nguyen.
|
||||
1. **[BERT For Sequence Generation](https://huggingface.co/docs/transformers/model_doc/bertgeneration)** (from Google) released with the paper [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://arxiv.org/abs/1907.12461) by Sascha Rothe, Shashi Narayan, Aliaksei Severyn.
|
||||
1. **[BigBird-RoBERTa](https://huggingface.co/docs/transformers/model_doc/bigbird)** (from Google Research) released with the paper [Big Bird: Transformers for Longer Sequences](https://arxiv.org/abs/2007.14062) by Manzil Zaheer, Guru Guruganesh, Avinava Dubey, Joshua Ainslie, Chris Alberti, Santiago Ontanon, Philip Pham, Anirudh Ravula, Qifan Wang, Li Yang, Amr Ahmed.
|
||||
1. **[BERT For Sequence Generation](https://huggingface.co/docs/transformers/model_doc/bert-generation)** (from Google) released with the paper [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://arxiv.org/abs/1907.12461) by Sascha Rothe, Shashi Narayan, Aliaksei Severyn.
|
||||
1. **[BigBird-RoBERTa](https://huggingface.co/docs/transformers/model_doc/big_bird)** (from Google Research) released with the paper [Big Bird: Transformers for Longer Sequences](https://arxiv.org/abs/2007.14062) by Manzil Zaheer, Guru Guruganesh, Avinava Dubey, Joshua Ainslie, Chris Alberti, Santiago Ontanon, Philip Pham, Anirudh Ravula, Qifan Wang, Li Yang, Amr Ahmed.
|
||||
1. **[BigBird-Pegasus](https://huggingface.co/docs/transformers/model_doc/bigbird_pegasus)** (from Google Research) released with the paper [Big Bird: Transformers for Longer Sequences](https://arxiv.org/abs/2007.14062) by Manzil Zaheer, Guru Guruganesh, Avinava Dubey, Joshua Ainslie, Chris Alberti, Santiago Ontanon, Philip Pham, Anirudh Ravula, Qifan Wang, Li Yang, Amr Ahmed.
|
||||
1. **[Blenderbot](https://huggingface.co/docs/transformers/model_doc/blenderbot)** (from Facebook) released with the paper [Recipes for building an open-domain chatbot](https://arxiv.org/abs/2004.13637) by Stephen Roller, Emily Dinan, Naman Goyal, Da Ju, Mary Williamson, Yinhan Liu, Jing Xu, Myle Ott, Kurt Shuster, Eric M. Smith, Y-Lan Boureau, Jason Weston.
|
||||
1. **[BlenderbotSmall](https://huggingface.co/docs/transformers/model_doc/blenderbot_small)** (from Facebook) released with the paper [Recipes for building an open-domain chatbot](https://arxiv.org/abs/2004.13637) by Stephen Roller, Emily Dinan, Naman Goyal, Da Ju, Mary Williamson, Yinhan Liu, Jing Xu, Myle Ott, Kurt Shuster, Eric M. Smith, Y-Lan Boureau, Jason Weston.
|
||||
1. **[BlenderbotSmall](https://huggingface.co/docs/transformers/model_doc/blenderbot-small)** (from Facebook) released with the paper [Recipes for building an open-domain chatbot](https://arxiv.org/abs/2004.13637) by Stephen Roller, Emily Dinan, Naman Goyal, Da Ju, Mary Williamson, Yinhan Liu, Jing Xu, Myle Ott, Kurt Shuster, Eric M. Smith, Y-Lan Boureau, Jason Weston.
|
||||
1. **[BORT](https://huggingface.co/docs/transformers/model_doc/bort)** (from Alexa) released with the paper [Optimal Subarchitecture Extraction For BERT](https://arxiv.org/abs/2010.10499) by Adrian de Wynter and Daniel J. Perry.
|
||||
1. **[ByT5](https://huggingface.co/docs/transformers/model_doc/byt5)** (from Google Research) released with the paper [ByT5: Towards a token-free future with pre-trained byte-to-byte models](https://arxiv.org/abs/2105.13626) by Linting Xue, Aditya Barua, Noah Constant, Rami Al-Rfou, Sharan Narang, Mihir Kale, Adam Roberts, Colin Raffel.
|
||||
1. **[CamemBERT](https://huggingface.co/docs/transformers/model_doc/camembert)** (from Inria/Facebook/Sorbonne) released with the paper [CamemBERT: a Tasty French Language Model](https://arxiv.org/abs/1911.03894) by Louis Martin*, Benjamin Muller*, Pedro Javier Ortiz Suárez*, Yoann Dupont, Laurent Romary, Éric Villemonte de la Clergerie, Djamé Seddah and Benoît Sagot.
|
||||
1. **[CANINE](https://huggingface.co/docs/transformers/model_doc/canine)** (from Google Research) released with the paper [CANINE: Pre-training an Efficient Tokenization-Free Encoder for Language Representation](https://arxiv.org/abs/2103.06874) by Jonathan H. Clark, Dan Garrette, Iulia Turc, John Wieting.
|
||||
1. **[ConvNeXT](https://huggingface.co/docs/transformers/model_doc/convnext)** (from Facebook AI) released with the paper [A ConvNet for the 2020s](https://arxiv.org/abs/2201.03545) by Zhuang Liu, Hanzi Mao, Chao-Yuan Wu, Christoph Feichtenhofer, Trevor Darrell, Saining Xie.
|
||||
1. **[CLIP](https://huggingface.co/docs/transformers/model_doc/clip)** (from OpenAI) released with the paper [Learning Transferable Visual Models From Natural Language Supervision](https://arxiv.org/abs/2103.00020) by Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, Ilya Sutskever.
|
||||
1. **[ConvBERT](https://huggingface.co/docs/transformers/model_doc/convbert)** (from YituTech) released with the paper [ConvBERT: Improving BERT with Span-based Dynamic Convolution](https://arxiv.org/abs/2008.02496) by Zihang Jiang, Weihao Yu, Daquan Zhou, Yunpeng Chen, Jiashi Feng, Shuicheng Yan.
|
||||
1. **[CPM](https://huggingface.co/docs/transformers/model_doc/cpm)** (from Tsinghua University) released with the paper [CPM: A Large-scale Generative Chinese Pre-trained Language Model](https://arxiv.org/abs/2012.00413) by Zhengyan Zhang, Xu Han, Hao Zhou, Pei Ke, Yuxian Gu, Deming Ye, Yujia Qin, Yusheng Su, Haozhe Ji, Jian Guan, Fanchao Qi, Xiaozhi Wang, Yanan Zheng, Guoyang Zeng, Huanqi Cao, Shengqi Chen, Daixuan Li, Zhenbo Sun, Zhiyuan Liu, Minlie Huang, Wentao Han, Jie Tang, Juanzi Li, Xiaoyan Zhu, Maosong Sun.
|
||||
1. **[CTRL](https://huggingface.co/docs/transformers/model_doc/ctrl)** (from Salesforce) released with the paper [CTRL: A Conditional Transformer Language Model for Controllable Generation](https://arxiv.org/abs/1909.05858) by Nitish Shirish Keskar*, Bryan McCann*, Lav R. Varshney, Caiming Xiong and Richard Socher.
|
||||
1. **[Data2Vec](https://huggingface.co/docs/transformers/model_doc/data2vec)** (from Facebook) released with the paper [Data2Vec: A General Framework for Self-supervised Learning in Speech, Vision and Language](https://arxiv.org/abs/2202.03555) by Alexei Baevski, Wei-Ning Hsu, Qiantong Xu, Arun Babu, Jiatao Gu, Michael Auli.
|
||||
1. **[DeBERTa](https://huggingface.co/docs/transformers/model_doc/deberta)** (from Microsoft) released with the paper [DeBERTa: Decoding-enhanced BERT with Disentangled Attention](https://arxiv.org/abs/2006.03654) by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen.
|
||||
1. **[DeBERTa-v2](https://huggingface.co/docs/transformers/model_doc/deberta_v2)** (from Microsoft) released with the paper [DeBERTa: Decoding-enhanced BERT with Disentangled Attention](https://arxiv.org/abs/2006.03654) by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen.
|
||||
1. **[DeBERTa-v2](https://huggingface.co/docs/transformers/model_doc/deberta-v2)** (from Microsoft) released with the paper [DeBERTa: Decoding-enhanced BERT with Disentangled Attention](https://arxiv.org/abs/2006.03654) by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen.
|
||||
1. **[Decision Transformer](https://huggingface.co/docs/transformers/model_doc/decision_transformer)** (from Berkeley/Facebook/Google) released with the paper [Decision Transformer: Reinforcement Learning via Sequence Modeling](https://arxiv.org/abs/2106.01345) by Lili Chen, Kevin Lu, Aravind Rajeswaran, Kimin Lee, Aditya Grover, Michael Laskin, Pieter Abbeel, Aravind Srinivas, Igor Mordatch.
|
||||
1. **[DiT](https://huggingface.co/docs/transformers/model_doc/dit)** (from Microsoft Research) released with the paper [DiT: Self-supervised Pre-training for Document Image Transformer](https://arxiv.org/abs/2203.02378) by Junlong Li, Yiheng Xu, Tengchao Lv, Lei Cui, Cha Zhang, Furu Wei.
|
||||
1. **[DeiT](https://huggingface.co/docs/transformers/model_doc/deit)** (from Facebook) released with the paper [Training data-efficient image transformers & distillation through attention](https://arxiv.org/abs/2012.12877) by Hugo Touvron, Matthieu Cord, Matthijs Douze, Francisco Massa, Alexandre Sablayrolles, Hervé Jégou.
|
||||
1. **[DETR](https://huggingface.co/docs/transformers/model_doc/detr)** (from Facebook) released with the paper [End-to-End Object Detection with Transformers](https://arxiv.org/abs/2005.12872) by Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, Sergey Zagoruyko.
|
||||
1. **[DialoGPT](https://huggingface.co/docs/transformers/model_doc/dialogpt)** (from Microsoft Research) released with the paper [DialoGPT: Large-Scale Generative Pre-training for Conversational Response Generation](https://arxiv.org/abs/1911.00536) by Yizhe Zhang, Siqi Sun, Michel Galley, Yen-Chun Chen, Chris Brockett, Xiang Gao, Jianfeng Gao, Jingjing Liu, Bill Dolan.
|
||||
1. **[DistilBERT](https://huggingface.co/docs/transformers/model_doc/distilbert)** (from HuggingFace), released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same method has been applied to compress GPT2 into [DistilGPT2](https://github.com/huggingface/transformers/tree/master/examples/research_projects/distillation), RoBERTa into [DistilRoBERTa](https://github.com/huggingface/transformers/tree/master/examples/research_projects/distillation), Multilingual BERT into [DistilmBERT](https://github.com/huggingface/transformers/tree/master/examples/research_projects/distillation) and a German version of DistilBERT.
|
||||
1. **[DistilBERT](https://huggingface.co/docs/transformers/model_doc/distilbert)** (from HuggingFace), released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same method has been applied to compress GPT2 into [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/research_projects/distillation), RoBERTa into [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/research_projects/distillation), Multilingual BERT into [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/research_projects/distillation) and a German version of DistilBERT.
|
||||
1. **[DPR](https://huggingface.co/docs/transformers/model_doc/dpr)** (from Facebook) released with the paper [Dense Passage Retrieval
|
||||
for Open-Domain Question Answering](https://arxiv.org/abs/2004.04906) by Vladimir Karpukhin, Barlas Oğuz, Sewon
|
||||
Min, Patrick Lewis, Ledell Wu, Sergey Edunov, Danqi Chen, and Wen-tau Yih.
|
||||
1. **[EncoderDecoder](https://huggingface.co/docs/transformers/model_doc/encoderdecoder)** (from Google Research) released with the paper [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://arxiv.org/abs/1907.12461) by Sascha Rothe, Shashi Narayan, Aliaksei Severyn.
|
||||
1. **[DPT](https://huggingface.co/docs/transformers/master/model_doc/dpt)** (from Intel Labs) released with the paper [Vision Transformers for Dense Prediction](https://arxiv.org/abs/2103.13413) by René Ranftl, Alexey Bochkovskiy, Vladlen Koltun.
|
||||
1. **[EncoderDecoder](https://huggingface.co/docs/transformers/model_doc/encoder-decoder)** (from Google Research) released with the paper [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://arxiv.org/abs/1907.12461) by Sascha Rothe, Shashi Narayan, Aliaksei Severyn.
|
||||
1. **[ELECTRA](https://huggingface.co/docs/transformers/model_doc/electra)** (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang Luong, Quoc V. Le, Christopher D. Manning.
|
||||
1. **[FlauBERT](https://huggingface.co/docs/transformers/model_doc/flaubert)** (from CNRS) released with the paper [FlauBERT: Unsupervised Language Model Pre-training for French](https://arxiv.org/abs/1912.05372) by Hang Le, Loïc Vial, Jibril Frej, Vincent Segonne, Maximin Coavoux, Benjamin Lecouteux, Alexandre Allauzen, Benoît Crabbé, Laurent Besacier, Didier Schwab.
|
||||
1. **[FNet](https://huggingface.co/docs/transformers/model_doc/fnet)** (from Google Research) released with the paper [FNet: Mixing Tokens with Fourier Transforms](https://arxiv.org/abs/2105.03824) by James Lee-Thorp, Joshua Ainslie, Ilya Eckstein, Santiago Ontanon.
|
||||
1. **[Funnel Transformer](https://huggingface.co/docs/transformers/model_doc/funnel)** (from CMU/Google Brain) released with the paper [Funnel-Transformer: Filtering out Sequential Redundancy for Efficient Language Processing](https://arxiv.org/abs/2006.03236) by Zihang Dai, Guokun Lai, Yiming Yang, Quoc V. Le.
|
||||
1. **[GPT](https://huggingface.co/docs/transformers/model_doc/gpt)** (from OpenAI) released with the paper [Improving Language Understanding by Generative Pre-Training](https://blog.openai.com/language-unsupervised/) by Alec Radford, Karthik Narasimhan, Tim Salimans and Ilya Sutskever.
|
||||
1. **[GLPN](https://huggingface.co/docs/transformers/model_doc/glpn)** (from KAIST) released with the paper [Global-Local Path Networks for Monocular Depth Estimation with Vertical CutDepth](https://arxiv.org/abs/2201.07436) by Doyeon Kim, Woonghyun Ga, Pyungwhan Ahn, Donggyu Joo, Sehwan Chun, Junmo Kim.
|
||||
1. **[GPT](https://huggingface.co/docs/transformers/model_doc/openai-gpt)** (from OpenAI) released with the paper [Improving Language Understanding by Generative Pre-Training](https://blog.openai.com/language-unsupervised/) by Alec Radford, Karthik Narasimhan, Tim Salimans and Ilya Sutskever.
|
||||
1. **[GPT-2](https://huggingface.co/docs/transformers/model_doc/gpt2)** (from OpenAI) released with the paper [Language Models are Unsupervised Multitask Learners](https://blog.openai.com/better-language-models/) by Alec Radford*, Jeffrey Wu*, Rewon Child, David Luan, Dario Amodei** and Ilya Sutskever**.
|
||||
1. **[GPT-J](https://huggingface.co/docs/transformers/model_doc/gptj)** (from EleutherAI) released in the repository [kingoflolz/mesh-transformer-jax](https://github.com/kingoflolz/mesh-transformer-jax/) by Ben Wang and Aran Komatsuzaki.
|
||||
1. **[GPT Neo](https://huggingface.co/docs/transformers/model_doc/gpt_neo)** (from EleutherAI) released in the repository [EleutherAI/gpt-neo](https://github.com/EleutherAI/gpt-neo) by Sid Black, Stella Biderman, Leo Gao, Phil Wang and Connor Leahy.
|
||||
1. **[Hubert](https://huggingface.co/docs/transformers/model_doc/hubert)** (from Facebook) released with the paper [HuBERT: Self-Supervised Speech Representation Learning by Masked Prediction of Hidden Units](https://arxiv.org/abs/2106.07447) by Wei-Ning Hsu, Benjamin Bolte, Yao-Hung Hubert Tsai, Kushal Lakhotia, Ruslan Salakhutdinov, Abdelrahman Mohamed.
|
||||
1. **[I-BERT](https://huggingface.co/docs/transformers/model_doc/ibert)** (from Berkeley) released with the paper [I-BERT: Integer-only BERT Quantization](https://arxiv.org/abs/2101.01321) by Sehoon Kim, Amir Gholami, Zhewei Yao, Michael W. Mahoney, Kurt Keutzer.
|
||||
1. **[ImageGPT](https://huggingface.co/docs/transformers/master/model_doc/imagegpt)** (from OpenAI) released with the paper [Generative Pretraining from Pixels](https://openai.com/blog/image-gpt/) by Mark Chen, Alec Radford, Rewon Child, Jeffrey Wu, Heewoo Jun, David Luan, Ilya Sutskever.
|
||||
1. **[ImageGPT](https://huggingface.co/docs/transformers/model_doc/imagegpt)** (from OpenAI) released with the paper [Generative Pretraining from Pixels](https://openai.com/blog/image-gpt/) by Mark Chen, Alec Radford, Rewon Child, Jeffrey Wu, Heewoo Jun, David Luan, Ilya Sutskever.
|
||||
1. **[LayoutLM](https://huggingface.co/docs/transformers/model_doc/layoutlm)** (from Microsoft Research Asia) released with the paper [LayoutLM: Pre-training of Text and Layout for Document Image Understanding](https://arxiv.org/abs/1912.13318) by Yiheng Xu, Minghao Li, Lei Cui, Shaohan Huang, Furu Wei, Ming Zhou.
|
||||
1. **[LayoutLMv2](https://huggingface.co/docs/transformers/model_doc/layoutlmv2)** (from Microsoft Research Asia) released with the paper [LayoutLMv2: Multi-modal Pre-training for Visually-Rich Document Understanding](https://arxiv.org/abs/2012.14740) by Yang Xu, Yiheng Xu, Tengchao Lv, Lei Cui, Furu Wei, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Wanxiang Che, Min Zhang, Lidong Zhou.
|
||||
1. **[LayoutXLM](https://huggingface.co/docs/transformers/model_doc/layoutlmv2)** (from Microsoft Research Asia) released with the paper [LayoutXLM: Multimodal Pre-training for Multilingual Visually-rich Document Understanding](https://arxiv.org/abs/2104.08836) by Yiheng Xu, Tengchao Lv, Lei Cui, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Furu Wei.
|
||||
@ -279,21 +285,27 @@ Min, Patrick Lewis, Ledell Wu, Sergey Edunov, Danqi Chen, and Wen-tau Yih.
|
||||
1. **[LXMERT](https://huggingface.co/docs/transformers/model_doc/lxmert)** (from UNC Chapel Hill) released with the paper [LXMERT: Learning Cross-Modality Encoder Representations from Transformers for Open-Domain Question Answering](https://arxiv.org/abs/1908.07490) by Hao Tan and Mohit Bansal.
|
||||
1. **[M2M100](https://huggingface.co/docs/transformers/model_doc/m2m_100)** (from Facebook) released with the paper [Beyond English-Centric Multilingual Machine Translation](https://arxiv.org/abs/2010.11125) by Angela Fan, Shruti Bhosale, Holger Schwenk, Zhiyi Ma, Ahmed El-Kishky, Siddharth Goyal, Mandeep Baines, Onur Celebi, Guillaume Wenzek, Vishrav Chaudhary, Naman Goyal, Tom Birch, Vitaliy Liptchinsky, Sergey Edunov, Edouard Grave, Michael Auli, Armand Joulin.
|
||||
1. **[MarianMT](https://huggingface.co/docs/transformers/model_doc/marian)** Machine translation models trained using [OPUS](http://opus.nlpl.eu/) data by Jörg Tiedemann. The [Marian Framework](https://marian-nmt.github.io/) is being developed by the Microsoft Translator Team.
|
||||
1. **[MaskFormer](https://huggingface.co/docs/transformers/model_doc/maskformer)** (from Meta and UIUC) released with the paper [Per-Pixel Classification is Not All You Need for Semantic Segmentation](https://arxiv.org/abs/2107.06278) by Bowen Cheng, Alexander G. Schwing, Alexander Kirillov.
|
||||
1. **[MBart](https://huggingface.co/docs/transformers/model_doc/mbart)** (from Facebook) released with the paper [Multilingual Denoising Pre-training for Neural Machine Translation](https://arxiv.org/abs/2001.08210) by Yinhan Liu, Jiatao Gu, Naman Goyal, Xian Li, Sergey Edunov, Marjan Ghazvininejad, Mike Lewis, Luke Zettlemoyer.
|
||||
1. **[MBart-50](https://huggingface.co/docs/transformers/model_doc/mbart)** (from Facebook) released with the paper [Multilingual Translation with Extensible Multilingual Pretraining and Finetuning](https://arxiv.org/abs/2008.00401) by Yuqing Tang, Chau Tran, Xian Li, Peng-Jen Chen, Naman Goyal, Vishrav Chaudhary, Jiatao Gu, Angela Fan.
|
||||
1. **[Megatron-BERT](https://huggingface.co/docs/transformers/model_doc/megatron_bert)** (from NVIDIA) released with the paper [Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism](https://arxiv.org/abs/1909.08053) by Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper and Bryan Catanzaro.
|
||||
1. **[Megatron-BERT](https://huggingface.co/docs/transformers/model_doc/megatron-bert)** (from NVIDIA) released with the paper [Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism](https://arxiv.org/abs/1909.08053) by Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper and Bryan Catanzaro.
|
||||
1. **[Megatron-GPT2](https://huggingface.co/docs/transformers/model_doc/megatron_gpt2)** (from NVIDIA) released with the paper [Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism](https://arxiv.org/abs/1909.08053) by Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper and Bryan Catanzaro.
|
||||
1. **[MPNet](https://huggingface.co/docs/transformers/model_doc/mpnet)** (from Microsoft Research) released with the paper [MPNet: Masked and Permuted Pre-training for Language Understanding](https://arxiv.org/abs/2004.09297) by Kaitao Song, Xu Tan, Tao Qin, Jianfeng Lu, Tie-Yan Liu.
|
||||
1. **[MT5](https://huggingface.co/docs/transformers/model_doc/mt5)** (from Google AI) released with the paper [mT5: A massively multilingual pre-trained text-to-text transformer](https://arxiv.org/abs/2010.11934) by Linting Xue, Noah Constant, Adam Roberts, Mihir Kale, Rami Al-Rfou, Aditya Siddhant, Aditya Barua, Colin Raffel.
|
||||
1. **[Nyströmformer](https://huggingface.co/docs/transformers/model_doc/nystromformer)** (from the University of Wisconsin - Madison) released with the paper [Nyströmformer: A Nyström-Based Algorithm for Approximating Self-Attention](https://arxiv.org/abs/2102.03902) by Yunyang Xiong, Zhanpeng Zeng, Rudrasis Chakraborty, Mingxing Tan, Glenn Fung, Yin Li, Vikas Singh.
|
||||
1. **[Pegasus](https://huggingface.co/docs/transformers/model_doc/pegasus)** (from Google) released with the paper [PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive Summarization](https://arxiv.org/abs/1912.08777) by Jingqing Zhang, Yao Zhao, Mohammad Saleh and Peter J. Liu.
|
||||
1. **[Perceiver IO](https://huggingface.co/docs/transformers/model_doc/perceiver)** (from Deepmind) released with the paper [Perceiver IO: A General Architecture for Structured Inputs & Outputs](https://arxiv.org/abs/2107.14795) by Andrew Jaegle, Sebastian Borgeaud, Jean-Baptiste Alayrac, Carl Doersch, Catalin Ionescu, David Ding, Skanda Koppula, Daniel Zoran, Andrew Brock, Evan Shelhamer, Olivier Hénaff, Matthew M. Botvinick, Andrew Zisserman, Oriol Vinyals, João Carreira.
|
||||
1. **[PhoBERT](https://huggingface.co/docs/transformers/model_doc/phobert)** (from VinAI Research) released with the paper [PhoBERT: Pre-trained language models for Vietnamese](https://www.aclweb.org/anthology/2020.findings-emnlp.92/) by Dat Quoc Nguyen and Anh Tuan Nguyen.
|
||||
1. **[PLBart](https://huggingface.co/docs/transformers/model_doc/plbart)** (from UCLA NLP) released with the paper [Unified Pre-training for Program Understanding and Generation](https://arxiv.org/abs/2103.06333) by Wasi Uddin Ahmad, Saikat Chakraborty, Baishakhi Ray, Kai-Wei Chang.
|
||||
1. **[PoolFormer](https://huggingface.co/docs/transformers/model_doc/poolformer)** (from Sea AI Labs) released with the paper [MetaFormer is Actually What You Need for Vision](https://arxiv.org/abs/2111.11418) by Yu, Weihao and Luo, Mi and Zhou, Pan and Si, Chenyang and Zhou, Yichen and Wang, Xinchao and Feng, Jiashi and Yan, Shuicheng.
|
||||
1. **[ProphetNet](https://huggingface.co/docs/transformers/model_doc/prophetnet)** (from Microsoft Research) released with the paper [ProphetNet: Predicting Future N-gram for Sequence-to-Sequence Pre-training](https://arxiv.org/abs/2001.04063) by Yu Yan, Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang and Ming Zhou.
|
||||
1. **[QDQBert](https://huggingface.co/docs/transformers/model_doc/qdqbert)** (from NVIDIA) released with the paper [Integer Quantization for Deep Learning Inference: Principles and Empirical Evaluation](https://arxiv.org/abs/2004.09602) by Hao Wu, Patrick Judd, Xiaojie Zhang, Mikhail Isaev and Paulius Micikevicius.
|
||||
1. **[REALM](https://huggingface.co/docs/transformers/model_doc/realm.html)** (from Google Research) released with the paper [REALM: Retrieval-Augmented Language Model Pre-Training](https://arxiv.org/abs/2002.08909) by Kelvin Guu, Kenton Lee, Zora Tung, Panupong Pasupat and Ming-Wei Chang.
|
||||
1. **[Reformer](https://huggingface.co/docs/transformers/model_doc/reformer)** (from Google Research) released with the paper [Reformer: The Efficient Transformer](https://arxiv.org/abs/2001.04451) by Nikita Kitaev, Łukasz Kaiser, Anselm Levskaya.
|
||||
1. **[RemBERT](https://huggingface.co/docs/transformers/model_doc/rembert)** (from Google Research) released with the paper [Rethinking embedding coupling in pre-trained language models](https://arxiv.org/pdf/2010.12821.pdf) by Hyung Won Chung, Thibault Févry, Henry Tsai, M. Johnson, Sebastian Ruder.
|
||||
1. **[RoBERTa](https://huggingface.co/docs/transformers/model_doc/roberta)** (from Facebook), released together with the paper a [Robustly Optimized BERT Pretraining Approach](https://arxiv.org/abs/1907.11692) by Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, Veselin Stoyanov.
|
||||
1. **[RoFormer](https://huggingface.co/docs/transformers/model_doc/roformer)** (from ZhuiyiTechnology), released together with the paper a [RoFormer: Enhanced Transformer with Rotary Position Embedding](https://arxiv.org/pdf/2104.09864v1.pdf) by Jianlin Su and Yu Lu and Shengfeng Pan and Bo Wen and Yunfeng Liu.
|
||||
1. **[RemBERT](https://huggingface.co/docs/transformers/model_doc/rembert)** (from Google Research) released with the paper [Rethinking embedding coupling in pre-trained language models](https://arxiv.org/abs/2010.12821) by Hyung Won Chung, Thibault Févry, Henry Tsai, M. Johnson, Sebastian Ruder.
|
||||
1. **[ResNet](https://huggingface.co/docs/transformers/model_doc/resnet)** (from Microsoft Research) released with the paper [Deep Residual Learning for Image Recognition](https://arxiv.org/abs/1512.03385) by Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun.
|
||||
1. **[RoBERTa](https://huggingface.co/docs/transformers/model_doc/roberta)** (from Facebook), released together with the paper [RoBERTa: A Robustly Optimized BERT Pretraining Approach](https://arxiv.org/abs/1907.11692) by Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, Veselin Stoyanov.
|
||||
1. **[RoFormer](https://huggingface.co/docs/transformers/model_doc/roformer)** (from ZhuiyiTechnology), released together with the paper [RoFormer: Enhanced Transformer with Rotary Position Embedding](https://arxiv.org/abs/2104.09864) by Jianlin Su and Yu Lu and Shengfeng Pan and Bo Wen and Yunfeng Liu.
|
||||
1. **[SegFormer](https://huggingface.co/docs/transformers/model_doc/segformer)** (from NVIDIA) released with the paper [SegFormer: Simple and Efficient Design for Semantic Segmentation with Transformers](https://arxiv.org/abs/2105.15203) by Enze Xie, Wenhai Wang, Zhiding Yu, Anima Anandkumar, Jose M. Alvarez, Ping Luo.
|
||||
1. **[SEW](https://huggingface.co/docs/transformers/model_doc/sew)** (from ASAPP) released with the paper [Performance-Efficiency Trade-offs in Unsupervised Pre-training for Speech Recognition](https://arxiv.org/abs/2109.06870) by Felix Wu, Kwangyoun Kim, Jing Pan, Kyu Han, Kilian Q. Weinberger, Yoav Artzi.
|
||||
1. **[SEW-D](https://huggingface.co/docs/transformers/model_doc/sew_d)** (from ASAPP) released with the paper [Performance-Efficiency Trade-offs in Unsupervised Pre-training for Speech Recognition](https://arxiv.org/abs/2109.06870) by Felix Wu, Kwangyoun Kim, Jing Pan, Kyu Han, Kilian Q. Weinberger, Yoav Artzi.
|
||||
@ -301,22 +313,32 @@ Min, Patrick Lewis, Ledell Wu, Sergey Edunov, Danqi Chen, and Wen-tau Yih.
|
||||
1. **[SpeechToTextTransformer2](https://huggingface.co/docs/transformers/model_doc/speech_to_text_2)** (from Facebook), released together with the paper [Large-Scale Self- and Semi-Supervised Learning for Speech Translation](https://arxiv.org/abs/2104.06678) by Changhan Wang, Anne Wu, Juan Pino, Alexei Baevski, Michael Auli, Alexis Conneau.
|
||||
1. **[Splinter](https://huggingface.co/docs/transformers/model_doc/splinter)** (from Tel Aviv University), released together with the paper [Few-Shot Question Answering by Pretraining Span Selection](https://arxiv.org/abs/2101.00438) by Ori Ram, Yuval Kirstain, Jonathan Berant, Amir Globerson, Omer Levy.
|
||||
1. **[SqueezeBert](https://huggingface.co/docs/transformers/model_doc/squeezebert)** (from Berkeley) released with the paper [SqueezeBERT: What can computer vision teach NLP about efficient neural networks?](https://arxiv.org/abs/2006.11316) by Forrest N. Iandola, Albert E. Shaw, Ravi Krishna, and Kurt W. Keutzer.
|
||||
1. **[Swin Transformer](https://huggingface.co/docs/transformers/model_doc/swin)** (from Microsoft) released with the paper [Swin Transformer: Hierarchical Vision Transformer using Shifted Windows](https://arxiv.org/abs/2103.14030) by Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, Baining Guo.
|
||||
1. **[T5](https://huggingface.co/docs/transformers/model_doc/t5)** (from Google AI) released with the paper [Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer](https://arxiv.org/abs/1910.10683) by Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu.
|
||||
1. **[T5v1.1](https://huggingface.co/docs/transformers/model_doc/t5v1.1)** (from Google AI) released in the repository [google-research/text-to-text-transfer-transformer](https://github.com/google-research/text-to-text-transfer-transformer/blob/main/released_checkpoints.md#t511) by Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu.
|
||||
1. **[TAPAS](https://huggingface.co/docs/transformers/model_doc/tapas)** (from Google AI) released with the paper [TAPAS: Weakly Supervised Table Parsing via Pre-training](https://arxiv.org/abs/2004.02349) by Jonathan Herzig, Paweł Krzysztof Nowak, Thomas Müller, Francesco Piccinno and Julian Martin Eisenschlos.
|
||||
1. **[Transformer-XL](https://huggingface.co/docs/transformers/model_doc/transformerxl)** (from Google/CMU) released with the paper [Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context](https://arxiv.org/abs/1901.02860) by Zihang Dai*, Zhilin Yang*, Yiming Yang, Jaime Carbonell, Quoc V. Le, Ruslan Salakhutdinov.
|
||||
1. **[Transformer-XL](https://huggingface.co/docs/transformers/model_doc/transfo-xl)** (from Google/CMU) released with the paper [Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context](https://arxiv.org/abs/1901.02860) by Zihang Dai*, Zhilin Yang*, Yiming Yang, Jaime Carbonell, Quoc V. Le, Ruslan Salakhutdinov.
|
||||
1. **[TrOCR](https://huggingface.co/docs/transformers/model_doc/trocr)** (from Microsoft), released together with the paper [TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models](https://arxiv.org/abs/2109.10282) by Minghao Li, Tengchao Lv, Lei Cui, Yijuan Lu, Dinei Florencio, Cha Zhang, Zhoujun Li, Furu Wei.
|
||||
1. **[UniSpeech](https://huggingface.co/docs/transformers/model_doc/unispeech)** (from Microsoft Research) released with the paper [UniSpeech: Unified Speech Representation Learning with Labeled and Unlabeled Data](https://arxiv.org/abs/2101.07597) by Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei, Michael Zeng, Xuedong Huang.
|
||||
1. **[UniSpeechSat](https://huggingface.co/docs/transformers/model_doc/unispeech_sat)** (from Microsoft Research) released with the paper [UNISPEECH-SAT: UNIVERSAL SPEECH REPRESENTATION LEARNING WITH SPEAKER
|
||||
1. **[UniSpeechSat](https://huggingface.co/docs/transformers/model_doc/unispeech-sat)** (from Microsoft Research) released with the paper [UNISPEECH-SAT: UNIVERSAL SPEECH REPRESENTATION LEARNING WITH SPEAKER
|
||||
AWARE PRE-TRAINING](https://arxiv.org/abs/2110.05752) by Sanyuan Chen, Yu Wu, Chengyi Wang, Zhengyang Chen, Zhuo Chen, Shujie Liu, Jian Wu, Yao Qian, Furu Wei, Jinyu Li, Xiangzhan Yu.
|
||||
1. **[VAN](https://huggingface.co/docs/transformers/model_doc/van)** (from Tsinghua University and Nankai University) released with the paper [Visual Attention Network](https://arxiv.org/abs/2202.09741) by Meng-Hao Guo, Cheng-Ze Lu, Zheng-Ning Liu, Ming-Ming Cheng, Shi-Min Hu.
|
||||
1. **[ViLT](https://huggingface.co/docs/transformers/model_doc/vilt)** (from NAVER AI Lab/Kakao Enterprise/Kakao Brain) released with the paper [ViLT: Vision-and-Language Transformer Without Convolution or Region Supervision](https://arxiv.org/abs/2102.03334) by Wonjae Kim, Bokyung Son, Ildoo Kim.
|
||||
1. **[Vision Transformer (ViT)](https://huggingface.co/docs/transformers/model_doc/vit)** (from Google AI) released with the paper [An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale](https://arxiv.org/abs/2010.11929) by Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, Neil Houlsby.
|
||||
1. **[ViTMAE](https://huggingface.co/docs/transformers/model_doc/vit_mae)** (from Meta AI) released with the paper [Masked Autoencoders Are Scalable Vision Learners](https://arxiv.org/abs/2111.06377) by Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Dollár, Ross Girshick.
|
||||
1. **[VisualBERT](https://huggingface.co/docs/transformers/model_doc/visual_bert)** (from UCLA NLP) released with the paper [VisualBERT: A Simple and Performant Baseline for Vision and Language](https://arxiv.org/pdf/1908.03557) by Liunian Harold Li, Mark Yatskar, Da Yin, Cho-Jui Hsieh, Kai-Wei Chang.
|
||||
1. **[WavLM](https://huggingface.co/docs/transformers/model_doc/wavlm)** (from Microsoft Research) released with the paper [WavLM: Large-Scale Self-Supervised Pre-Training for Full Stack Speech Processing](https://arxiv.org/abs/2110.13900) by Sanyuan Chen, Chengyi Wang, Zhengyang Chen, Yu Wu, Shujie Liu, Zhuo Chen, Jinyu Li, Naoyuki Kanda, Takuya Yoshioka, Xiong Xiao, Jian Wu, Long Zhou, Shuo Ren, Yanmin Qian, Yao Qian, Jian Wu, Michael Zeng, Furu Wei.
|
||||
1. **[Wav2Vec2](https://huggingface.co/docs/transformers/model_doc/wav2vec2)** (from Facebook AI) released with the paper [wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations](https://arxiv.org/abs/2006.11477) by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli.
|
||||
1. **[Wav2Vec2Phoneme](https://huggingface.co/docs/transformers/model_doc/wav2vec2_phoneme)** (from Facebook AI) released with the paper [Simple and Effective Zero-shot Cross-lingual Phoneme Recognition](https://arxiv.org/abs/2109.11680) by Qiantong Xu, Alexei Baevski, Michael Auli.
|
||||
1. **[XGLM](https://huggingface.co/docs/transformers/model_doc/xglm)** (From Facebook AI) released with the paper [Few-shot Learning with Multilingual Language Models](https://arxiv.org/abs/2112.10668) by Xi Victoria Lin, Todor Mihaylov, Mikel Artetxe, Tianlu Wang, Shuohui Chen, Daniel Simig, Myle Ott, Naman Goyal, Shruti Bhosale, Jingfei Du, Ramakanth Pasunuru, Sam Shleifer, Punit Singh Koura, Vishrav Chaudhary, Brian O'Horo, Jeff Wang, Luke Zettlemoyer, Zornitsa Kozareva, Mona Diab, Veselin Stoyanov, Xian Li.
|
||||
1. **[XLM](https://huggingface.co/docs/transformers/model_doc/xlm)** (from Facebook) released together with the paper [Cross-lingual Language Model Pretraining](https://arxiv.org/abs/1901.07291) by Guillaume Lample and Alexis Conneau.
|
||||
1. **[XLM-ProphetNet](https://huggingface.co/docs/transformers/model_doc/xlmprophetnet)** (from Microsoft Research) released with the paper [ProphetNet: Predicting Future N-gram for Sequence-to-Sequence Pre-training](https://arxiv.org/abs/2001.04063) by Yu Yan, Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang and Ming Zhou.
|
||||
1. **[XLM-RoBERTa](https://huggingface.co/docs/transformers/model_doc/xlmroberta)** (from Facebook AI), released together with the paper [Unsupervised Cross-lingual Representation Learning at Scale](https://arxiv.org/abs/1911.02116) by Alexis Conneau*, Kartikay Khandelwal*, Naman Goyal, Vishrav Chaudhary, Guillaume Wenzek, Francisco Guzmán, Edouard Grave, Myle Ott, Luke Zettlemoyer and Veselin Stoyanov.
|
||||
1. **[XLM-ProphetNet](https://huggingface.co/docs/transformers/model_doc/xlm-prophetnet)** (from Microsoft Research) released with the paper [ProphetNet: Predicting Future N-gram for Sequence-to-Sequence Pre-training](https://arxiv.org/abs/2001.04063) by Yu Yan, Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang and Ming Zhou.
|
||||
1. **[XLM-RoBERTa](https://huggingface.co/docs/transformers/model_doc/xlm-roberta)** (from Facebook AI), released together with the paper [Unsupervised Cross-lingual Representation Learning at Scale](https://arxiv.org/abs/1911.02116) by Alexis Conneau*, Kartikay Khandelwal*, Naman Goyal, Vishrav Chaudhary, Guillaume Wenzek, Francisco Guzmán, Edouard Grave, Myle Ott, Luke Zettlemoyer and Veselin Stoyanov.
|
||||
1. **[XLM-RoBERTa-XL](https://huggingface.co/docs/transformers/model_doc/xlm-roberta-xl)** (from Facebook AI), released together with the paper [Larger-Scale Transformers for Multilingual Masked Language Modeling](https://arxiv.org/abs/2105.00572) by Naman Goyal, Jingfei Du, Myle Ott, Giri Anantharaman, Alexis Conneau.
|
||||
1. **[XLNet](https://huggingface.co/docs/transformers/model_doc/xlnet)** (from Google/CMU) released with the paper [XLNet: Generalized Autoregressive Pretraining for Language Understanding](https://arxiv.org/abs/1906.08237) by Zhilin Yang*, Zihang Dai*, Yiming Yang, Jaime Carbonell, Ruslan Salakhutdinov, Quoc V. Le.
|
||||
1. **[XLSR-Wav2Vec2](https://huggingface.co/docs/transformers/model_doc/xlsr_wav2vec2)** (from Facebook AI) released with the paper [Unsupervised Cross-Lingual Representation Learning For Speech Recognition](https://arxiv.org/abs/2006.13979) by Alexis Conneau, Alexei Baevski, Ronan Collobert, Abdelrahman Mohamed, Michael Auli.
|
||||
1. **[XLS-R](https://huggingface.co/docs/transformers/model_doc/xls_r)** (from Facebook AI) released with the paper [XLS-R: Self-supervised Cross-lingual Speech Representation Learning at Scale](https://arxiv.org/abs/2111.09296) by Arun Babu, Changhan Wang, Andros Tjandra, Kushal Lakhotia, Qiantong Xu, Naman Goyal, Kritika Singh, Patrick von Platen, Yatharth Saraf, Juan Pino, Alexei Baevski, Alexis Conneau, Michael Auli.
|
||||
1. **[YOSO](https://huggingface.co/docs/transformers/model_doc/yoso)** (from the University of Wisconsin - Madison) released with the paper [You Only Sample (Almost) Once: Linear Cost Self-Attention Via Bernoulli Sampling](https://arxiv.org/abs/2111.09714) by Zhanpeng Zeng, Yunyang Xiong, Sathya N. Ravi, Shailesh Acharya, Glenn Fung, Vikas Singh.
|
||||
1. Want to contribute a new model? We have added a **detailed guide and templates** to guide you in the process of adding a new model. You can find them in the [`templates`](./templates) folder of the repository. Be sure to check the [contributing guidelines](./CONTRIBUTING.md) and contact the maintainers or open an issue to collect feedbacks before starting your PR.
|
||||
|
||||
To check if each model has an implementation in Flax, PyTorch or TensorFlow, or has an associated tokenizer backed by the 🤗 Tokenizers library, refer to [this table](https://huggingface.co/docs/transformers/index#supported-frameworks).
|
||||
@ -330,9 +352,9 @@ These implementations have been tested on several datasets (see the example scri
|
||||
|-|-|
|
||||
| [Documentation](https://huggingface.co/docs/transformers/) | Full API documentation and tutorials |
|
||||
| [Task summary](https://huggingface.co/docs/transformers/task_summary) | Tasks supported by 🤗 Transformers |
|
||||
| [Preprocessing tutorial](https://huggingface.co/docstransformers/preprocessing) | Using the `Tokenizer` class to prepare data for the models |
|
||||
| [Preprocessing tutorial](https://huggingface.co/docs/transformers/preprocessing) | Using the `Tokenizer` class to prepare data for the models |
|
||||
| [Training and fine-tuning](https://huggingface.co/docs/transformers/training) | Using the models provided by 🤗 Transformers in a PyTorch/TensorFlow training loop and the `Trainer` API |
|
||||
| [Quick tour: Fine-tuning/usage scripts](https://github.com/huggingface/transformers/tree/master/examples) | Example scripts for fine-tuning models on a wide range of tasks |
|
||||
| [Quick tour: Fine-tuning/usage scripts](https://github.com/huggingface/transformers/tree/main/examples) | Example scripts for fine-tuning models on a wide range of tasks |
|
||||
| [Model sharing and uploading](https://huggingface.co/docs/transformers/model_sharing) | Upload and share your fine-tuned models with the community |
|
||||
| [Migration](https://huggingface.co/docs/transformers/migration) | Migrate to 🤗 Transformers from `pytorch-transformers` or `pytorch-pretrained-bert` |
|
||||
|
||||
|
||||
66
README_ko.md
66
README_ko.md
@ -16,14 +16,14 @@ limitations under the License.
|
||||
|
||||
<p align="center">
|
||||
<br>
|
||||
<img src="https://raw.githubusercontent.com/huggingface/transformers/master/docs/source/imgs/transformers_logo_name.png" width="400"/>
|
||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers_logo_name.png" width="400"/>
|
||||
<br>
|
||||
<p>
|
||||
<p align="center">
|
||||
<a href="https://circleci.com/gh/huggingface/transformers">
|
||||
<img alt="Build" src="https://img.shields.io/circleci/build/github/huggingface/transformers/master">
|
||||
<img alt="Build" src="https://img.shields.io/circleci/build/github/huggingface/transformers/main">
|
||||
</a>
|
||||
<a href="https://github.com/huggingface/transformers/blob/master/LICENSE">
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/LICENSE">
|
||||
<img alt="GitHub" src="https://img.shields.io/github/license/huggingface/transformers.svg?color=blue">
|
||||
</a>
|
||||
<a href="https://huggingface.co/docs/transformers/index">
|
||||
@ -32,7 +32,7 @@ limitations under the License.
|
||||
<a href="https://github.com/huggingface/transformers/releases">
|
||||
<img alt="GitHub release" src="https://img.shields.io/github/release/huggingface/transformers.svg">
|
||||
</a>
|
||||
<a href="https://github.com/huggingface/transformers/blob/master/CODE_OF_CONDUCT.md">
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/CODE_OF_CONDUCT.md">
|
||||
<img alt="Contributor Covenant" src="https://img.shields.io/badge/Contributor%20Covenant-v2.0%20adopted-ff69b4.svg">
|
||||
</a>
|
||||
<a href="https://zenodo.org/badge/latestdoi/155220641"><img src="https://zenodo.org/badge/155220641.svg" alt="DOI"></a>
|
||||
@ -41,8 +41,8 @@ limitations under the License.
|
||||
<h4 align="center">
|
||||
<p>
|
||||
<a href="https://github.com/huggingface/transformers/">English</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/master/README_zh-hans.md">简体中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/master/README_zh-hant.md">繁體中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_zh-hans.md">简体中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_zh-hant.md">繁體中文</a> |
|
||||
<b>한국어</b>
|
||||
<p>
|
||||
</h4>
|
||||
@ -52,7 +52,7 @@ limitations under the License.
|
||||
</h3>
|
||||
|
||||
<h3 align="center">
|
||||
<a href="https://hf.co/course"><img src="https://raw.githubusercontent.com/huggingface/transformers/master/docs/source/imgs/course_banner.png"></a>
|
||||
<a href="https://hf.co/course"><img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/course_banner.png"></a>
|
||||
</h3>
|
||||
|
||||
🤗 Transformers는 분류, 정보 추출, 질문 답변, 요약, 번역, 문장 생성 등을 100개 이상의 언어로 수행할 수 있는 수천개의 사전학습된 모델을 제공합니다. 우리의 목표는 모두가 최첨단의 NLP 기술을 쉽게 사용하는 것입니다.
|
||||
@ -166,7 +166,7 @@ limitations under the License.
|
||||
|
||||
- 이 라이브러리는 신경망 블록을 만들기 위한 모듈이 아닙니다. 연구자들이 여러 파일을 살펴보지 않고 바로 각 모델을 사용할 수 있도록, 모델 파일 코드의 추상화 수준을 적정하게 유지했습니다.
|
||||
- 학습 API는 모든 모델에 적용할 수 있도록 만들어지진 않았지만, 라이브러리가 제공하는 모델들에 적용할 수 있도록 최적화되었습니다. 일반적인 머신 러닝을 위해선, 다른 라이브러리를 사용하세요.
|
||||
- 가능한 많은 사용 예시를 보여드리고 싶어서, [예시 폴더](https://github.com/huggingface/transformers/tree/master/examples)의 스크립트를 준비했습니다. 이 스크립트들을 수정 없이 특정한 문제에 바로 적용하지 못할 수 있습니다. 필요에 맞게 일부 코드를 수정해야 할 수 있습니다.
|
||||
- 가능한 많은 사용 예시를 보여드리고 싶어서, [예시 폴더](https://github.com/huggingface/transformers/tree/main/examples)의 스크립트를 준비했습니다. 이 스크립트들을 수정 없이 특정한 문제에 바로 적용하지 못할 수 있습니다. 필요에 맞게 일부 코드를 수정해야 할 수 있습니다.
|
||||
|
||||
## 설치
|
||||
|
||||
@ -215,39 +215,45 @@ Flax, PyTorch, TensorFlow 설치 페이지에서 이들을 conda로 설치하는
|
||||
1. **[BARTpho](https://huggingface.co/docs/transformers/model_doc/bartpho)** (from VinAI Research) released with the paper [BARTpho: Pre-trained Sequence-to-Sequence Models for Vietnamese](https://arxiv.org/abs/2109.09701) by Nguyen Luong Tran, Duong Minh Le and Dat Quoc Nguyen.
|
||||
1. **[BEiT](https://huggingface.co/docs/transformers/model_doc/beit)** (from Microsoft) released with the paper [BEiT: BERT Pre-Training of Image Transformers](https://arxiv.org/abs/2106.08254) by Hangbo Bao, Li Dong, Furu Wei.
|
||||
1. **[BERT](https://huggingface.co/docs/transformers/model_doc/bert)** (from Google) released with the paper [BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding](https://arxiv.org/abs/1810.04805) by Jacob Devlin, Ming-Wei Chang, Kenton Lee and Kristina Toutanova.
|
||||
1. **[BERT For Sequence Generation](https://huggingface.co/docs/transformers/model_doc/bertgeneration)** (from Google) released with the paper [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://arxiv.org/abs/1907.12461) by Sascha Rothe, Shashi Narayan, Aliaksei Severyn.
|
||||
1. **[BERT For Sequence Generation](https://huggingface.co/docs/transformers/model_doc/bert-generation)** (from Google) released with the paper [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://arxiv.org/abs/1907.12461) by Sascha Rothe, Shashi Narayan, Aliaksei Severyn.
|
||||
1. **[BERTweet](https://huggingface.co/docs/transformers/model_doc/bertweet)** (from VinAI Research) released with the paper [BERTweet: A pre-trained language model for English Tweets](https://aclanthology.org/2020.emnlp-demos.2/) by Dat Quoc Nguyen, Thanh Vu and Anh Tuan Nguyen.
|
||||
1. **[BigBird-Pegasus](https://huggingface.co/docs/transformers/model_doc/bigbird_pegasus)** (from Google Research) released with the paper [Big Bird: Transformers for Longer Sequences](https://arxiv.org/abs/2007.14062) by Manzil Zaheer, Guru Guruganesh, Avinava Dubey, Joshua Ainslie, Chris Alberti, Santiago Ontanon, Philip Pham, Anirudh Ravula, Qifan Wang, Li Yang, Amr Ahmed.
|
||||
1. **[BigBird-RoBERTa](https://huggingface.co/docs/transformers/model_doc/bigbird)** (from Google Research) released with the paper [Big Bird: Transformers for Longer Sequences](https://arxiv.org/abs/2007.14062) by Manzil Zaheer, Guru Guruganesh, Avinava Dubey, Joshua Ainslie, Chris Alberti, Santiago Ontanon, Philip Pham, Anirudh Ravula, Qifan Wang, Li Yang, Amr Ahmed.
|
||||
1. **[BigBird-RoBERTa](https://huggingface.co/docs/transformers/model_doc/big_bird)** (from Google Research) released with the paper [Big Bird: Transformers for Longer Sequences](https://arxiv.org/abs/2007.14062) by Manzil Zaheer, Guru Guruganesh, Avinava Dubey, Joshua Ainslie, Chris Alberti, Santiago Ontanon, Philip Pham, Anirudh Ravula, Qifan Wang, Li Yang, Amr Ahmed.
|
||||
1. **[Blenderbot](https://huggingface.co/docs/transformers/model_doc/blenderbot)** (from Facebook) released with the paper [Recipes for building an open-domain chatbot](https://arxiv.org/abs/2004.13637) by Stephen Roller, Emily Dinan, Naman Goyal, Da Ju, Mary Williamson, Yinhan Liu, Jing Xu, Myle Ott, Kurt Shuster, Eric M. Smith, Y-Lan Boureau, Jason Weston.
|
||||
1. **[BlenderbotSmall](https://huggingface.co/docs/transformers/model_doc/blenderbot_small)** (from Facebook) released with the paper [Recipes for building an open-domain chatbot](https://arxiv.org/abs/2004.13637) by Stephen Roller, Emily Dinan, Naman Goyal, Da Ju, Mary Williamson, Yinhan Liu, Jing Xu, Myle Ott, Kurt Shuster, Eric M. Smith, Y-Lan Boureau, Jason Weston.
|
||||
1. **[BlenderbotSmall](https://huggingface.co/docs/transformers/model_doc/blenderbot-small)** (from Facebook) released with the paper [Recipes for building an open-domain chatbot](https://arxiv.org/abs/2004.13637) by Stephen Roller, Emily Dinan, Naman Goyal, Da Ju, Mary Williamson, Yinhan Liu, Jing Xu, Myle Ott, Kurt Shuster, Eric M. Smith, Y-Lan Boureau, Jason Weston.
|
||||
1. **[BORT](https://huggingface.co/docs/transformers/model_doc/bort)** (from Alexa) released with the paper [Optimal Subarchitecture Extraction For BERT](https://arxiv.org/abs/2010.10499) by Adrian de Wynter and Daniel J. Perry.
|
||||
1. **[ByT5](https://huggingface.co/docs/transformers/model_doc/byt5)** (from Google Research) released with the paper [ByT5: Towards a token-free future with pre-trained byte-to-byte models](https://arxiv.org/abs/2105.13626) by Linting Xue, Aditya Barua, Noah Constant, Rami Al-Rfou, Sharan Narang, Mihir Kale, Adam Roberts, Colin Raffel.
|
||||
1. **[CamemBERT](https://huggingface.co/docs/transformers/model_doc/camembert)** (from Inria/Facebook/Sorbonne) released with the paper [CamemBERT: a Tasty French Language Model](https://arxiv.org/abs/1911.03894) by Louis Martin*, Benjamin Muller*, Pedro Javier Ortiz Suárez*, Yoann Dupont, Laurent Romary, Éric Villemonte de la Clergerie, Djamé Seddah and Benoît Sagot.
|
||||
1. **[CANINE](https://huggingface.co/docs/transformers/model_doc/canine)** (from Google Research) released with the paper [CANINE: Pre-training an Efficient Tokenization-Free Encoder for Language Representation](https://arxiv.org/abs/2103.06874) by Jonathan H. Clark, Dan Garrette, Iulia Turc, John Wieting.
|
||||
1. **[CLIP](https://huggingface.co/docs/transformers/model_doc/clip)** (from OpenAI) released with the paper [Learning Transferable Visual Models From Natural Language Supervision](https://arxiv.org/abs/2103.00020) by Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, Ilya Sutskever.
|
||||
1. **[ConvBERT](https://huggingface.co/docs/transformers/model_doc/convbert)** (from YituTech) released with the paper [ConvBERT: Improving BERT with Span-based Dynamic Convolution](https://arxiv.org/abs/2008.02496) by Zihang Jiang, Weihao Yu, Daquan Zhou, Yunpeng Chen, Jiashi Feng, Shuicheng Yan.
|
||||
1. **[ConvNeXT](https://huggingface.co/docs/transformers/main/model_doc/convnext)** (from Facebook AI) released with the paper [A ConvNet for the 2020s](https://arxiv.org/abs/2201.03545) by Zhuang Liu, Hanzi Mao, Chao-Yuan Wu, Christoph Feichtenhofer, Trevor Darrell, Saining Xie.
|
||||
1. **[CPM](https://huggingface.co/docs/transformers/model_doc/cpm)** (from Tsinghua University) released with the paper [CPM: A Large-scale Generative Chinese Pre-trained Language Model](https://arxiv.org/abs/2012.00413) by Zhengyan Zhang, Xu Han, Hao Zhou, Pei Ke, Yuxian Gu, Deming Ye, Yujia Qin, Yusheng Su, Haozhe Ji, Jian Guan, Fanchao Qi, Xiaozhi Wang, Yanan Zheng, Guoyang Zeng, Huanqi Cao, Shengqi Chen, Daixuan Li, Zhenbo Sun, Zhiyuan Liu, Minlie Huang, Wentao Han, Jie Tang, Juanzi Li, Xiaoyan Zhu, Maosong Sun.
|
||||
1. **[CTRL](https://huggingface.co/docs/transformers/model_doc/ctrl)** (from Salesforce) released with the paper [CTRL: A Conditional Transformer Language Model for Controllable Generation](https://arxiv.org/abs/1909.05858) by Nitish Shirish Keskar*, Bryan McCann*, Lav R. Varshney, Caiming Xiong and Richard Socher.
|
||||
1. **[Data2Vec](https://huggingface.co/docs/transformers/main/model_doc/data2vec)** (from Facebook) released with the paper [Data2Vec: A General Framework for Self-supervised Learning in Speech, Vision and Language](https://arxiv.org/abs/2202.03555) by Alexei Baevski, Wei-Ning Hsu, Qiantong Xu, Arun Babu, Jiatao Gu, Michael Auli.
|
||||
1. **[DeBERTa](https://huggingface.co/docs/transformers/model_doc/deberta)** (from Microsoft) released with the paper [DeBERTa: Decoding-enhanced BERT with Disentangled Attention](https://arxiv.org/abs/2006.03654) by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen.
|
||||
1. **[DeBERTa-v2](https://huggingface.co/docs/transformers/model_doc/deberta_v2)** (from Microsoft) released with the paper [DeBERTa: Decoding-enhanced BERT with Disentangled Attention](https://arxiv.org/abs/2006.03654) by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen.
|
||||
1. **[DeBERTa-v2](https://huggingface.co/docs/transformers/model_doc/deberta-v2)** (from Microsoft) released with the paper [DeBERTa: Decoding-enhanced BERT with Disentangled Attention](https://arxiv.org/abs/2006.03654) by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen.
|
||||
1. **[Decision Transformer](https://huggingface.co/docs/transformers/model_doc/decision_transformer)** (from Berkeley/Facebook/Google) released with the paper [Decision Transformer: Reinforcement Learning via Sequence Modeling](https://arxiv.org/abs/2106.01345) by Lili Chen, Kevin Lu, Aravind Rajeswaran, Kimin Lee, Aditya Grover, Michael Laskin, Pieter Abbeel, Aravind Srinivas, Igor Mordatch.
|
||||
1. **[DeiT](https://huggingface.co/docs/transformers/model_doc/deit)** (from Facebook) released with the paper [Training data-efficient image transformers & distillation through attention](https://arxiv.org/abs/2012.12877) by Hugo Touvron, Matthieu Cord, Matthijs Douze, Francisco Massa, Alexandre Sablayrolles, Hervé Jégou.
|
||||
1. **[DETR](https://huggingface.co/docs/transformers/model_doc/detr)** (from Facebook) released with the paper [End-to-End Object Detection with Transformers](https://arxiv.org/abs/2005.12872) by Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, Sergey Zagoruyko.
|
||||
1. **[DialoGPT](https://huggingface.co/docs/transformers/model_doc/dialogpt)** (from Microsoft Research) released with the paper [DialoGPT: Large-Scale Generative Pre-training for Conversational Response Generation](https://arxiv.org/abs/1911.00536) by Yizhe Zhang, Siqi Sun, Michel Galley, Yen-Chun Chen, Chris Brockett, Xiang Gao, Jianfeng Gao, Jingjing Liu, Bill Dolan.
|
||||
1. **[DistilBERT](https://huggingface.co/docs/transformers/model_doc/distilbert)** (from HuggingFace), released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same method has been applied to compress GPT2 into [DistilGPT2](https://github.com/huggingface/transformers/tree/master/examples/distillation), RoBERTa into [DistilRoBERTa](https://github.com/huggingface/transformers/tree/master/examples/distillation), Multilingual BERT into [DistilmBERT](https://github.com/huggingface/transformers/tree/master/examples/distillation) and a German version of DistilBERT.
|
||||
1. **[DistilBERT](https://huggingface.co/docs/transformers/model_doc/distilbert)** (from HuggingFace), released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same method has been applied to compress GPT2 into [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation), Multilingual BERT into [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German version of DistilBERT.
|
||||
1. **[DiT](https://huggingface.co/docs/transformers/model_doc/dit)** (from Microsoft Research) released with the paper [DiT: Self-supervised Pre-training for Document Image Transformer](https://arxiv.org/abs/2203.02378) by Junlong Li, Yiheng Xu, Tengchao Lv, Lei Cui, Cha Zhang, Furu Wei.
|
||||
1. **[DPR](https://huggingface.co/docs/transformers/model_doc/dpr)** (from Facebook) released with the paper [Dense Passage Retrieval for Open-Domain Question Answering](https://arxiv.org/abs/2004.04906) by Vladimir Karpukhin, Barlas Oğuz, Sewon Min, Patrick Lewis, Ledell Wu, Sergey Edunov, Danqi Chen, and Wen-tau Yih.
|
||||
1. **[DPT](https://huggingface.co/docs/transformers/master/model_doc/dpt)** (from Intel Labs) released with the paper [Vision Transformers for Dense Prediction](https://arxiv.org/abs/2103.13413) by René Ranftl, Alexey Bochkovskiy, Vladlen Koltun.
|
||||
1. **[ELECTRA](https://huggingface.co/docs/transformers/model_doc/electra)** (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang Luong, Quoc V. Le, Christopher D. Manning.
|
||||
1. **[EncoderDecoder](https://huggingface.co/docs/transformers/model_doc/encoderdecoder)** (from Google Research) released with the paper [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://arxiv.org/abs/1907.12461) by Sascha Rothe, Shashi Narayan, Aliaksei Severyn.
|
||||
1. **[EncoderDecoder](https://huggingface.co/docs/transformers/model_doc/encoder-decoder)** (from Google Research) released with the paper [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://arxiv.org/abs/1907.12461) by Sascha Rothe, Shashi Narayan, Aliaksei Severyn.
|
||||
1. **[FlauBERT](https://huggingface.co/docs/transformers/model_doc/flaubert)** (from CNRS) released with the paper [FlauBERT: Unsupervised Language Model Pre-training for French](https://arxiv.org/abs/1912.05372) by Hang Le, Loïc Vial, Jibril Frej, Vincent Segonne, Maximin Coavoux, Benjamin Lecouteux, Alexandre Allauzen, Benoît Crabbé, Laurent Besacier, Didier Schwab.
|
||||
1. **[FNet](https://huggingface.co/docs/transformers/model_doc/fnet)** (from Google Research) released with the paper [FNet: Mixing Tokens with Fourier Transforms](https://arxiv.org/abs/2105.03824) by James Lee-Thorp, Joshua Ainslie, Ilya Eckstein, Santiago Ontanon.
|
||||
1. **[Funnel Transformer](https://huggingface.co/docs/transformers/model_doc/funnel)** (from CMU/Google Brain) released with the paper [Funnel-Transformer: Filtering out Sequential Redundancy for Efficient Language Processing](https://arxiv.org/abs/2006.03236) by Zihang Dai, Guokun Lai, Yiming Yang, Quoc V. Le.
|
||||
1. **[GPT](https://huggingface.co/docs/transformers/model_doc/gpt)** (from OpenAI) released with the paper [Improving Language Understanding by Generative Pre-Training](https://blog.openai.com/language-unsupervised/) by Alec Radford, Karthik Narasimhan, Tim Salimans and Ilya Sutskever.
|
||||
1. **[GLPN](https://huggingface.co/docs/transformers/main/model_doc/glpn)** (from KAIST) released with the paper [Global-Local Path Networks for Monocular Depth Estimation with Vertical CutDepth](https://arxiv.org/abs/2201.07436) by Doyeon Kim, Woonghyun Ga, Pyungwhan Ahn, Donggyu Joo, Sehwan Chun, Junmo Kim.
|
||||
1. **[GPT](https://huggingface.co/docs/transformers/model_doc/openai-gpt)** (from OpenAI) released with the paper [Improving Language Understanding by Generative Pre-Training](https://blog.openai.com/language-unsupervised/) by Alec Radford, Karthik Narasimhan, Tim Salimans and Ilya Sutskever.
|
||||
1. **[GPT Neo](https://huggingface.co/docs/transformers/model_doc/gpt_neo)** (from EleutherAI) released in the repository [EleutherAI/gpt-neo](https://github.com/EleutherAI/gpt-neo) by Sid Black, Stella Biderman, Leo Gao, Phil Wang and Connor Leahy.
|
||||
1. **[GPT-2](https://huggingface.co/docs/transformers/model_doc/gpt2)** (from OpenAI) released with the paper [Language Models are Unsupervised Multitask Learners](https://blog.openai.com/better-language-models/) by Alec Radford*, Jeffrey Wu*, Rewon Child, David Luan, Dario Amodei** and Ilya Sutskever**.
|
||||
1. **[GPT-J](https://huggingface.co/docs/transformers/model_doc/gptj)** (from EleutherAI) released in the repository [kingoflolz/mesh-transformer-jax](https://github.com/kingoflolz/mesh-transformer-jax/) by Ben Wang and Aran Komatsuzaki.
|
||||
1. **[Hubert](https://huggingface.co/docs/transformers/model_doc/hubert)** (from Facebook) released with the paper [HuBERT: Self-Supervised Speech Representation Learning by Masked Prediction of Hidden Units](https://arxiv.org/abs/2106.07447) by Wei-Ning Hsu, Benjamin Bolte, Yao-Hung Hubert Tsai, Kushal Lakhotia, Ruslan Salakhutdinov, Abdelrahman Mohamed.
|
||||
1. **[I-BERT](https://huggingface.co/docs/transformers/model_doc/ibert)** (from Berkeley) released with the paper [I-BERT: Integer-only BERT Quantization](https://arxiv.org/abs/2101.01321) by Sehoon Kim, Amir Gholami, Zhewei Yao, Michael W. Mahoney, Kurt Keutzer.
|
||||
1. **[ImageGPT](https://huggingface.co/docs/transformers/master/model_doc/imagegpt)** (from OpenAI) released with the paper [Generative Pretraining from Pixels](https://openai.com/blog/image-gpt/) by Mark Chen, Alec Radford, Rewon Child, Jeffrey Wu, Heewoo Jun, David Luan, Ilya Sutskever.
|
||||
1. **[ImageGPT](https://huggingface.co/docs/transformers/main/model_doc/imagegpt)** (from OpenAI) released with the paper [Generative Pretraining from Pixels](https://openai.com/blog/image-gpt/) by Mark Chen, Alec Radford, Rewon Child, Jeffrey Wu, Heewoo Jun, David Luan, Ilya Sutskever.
|
||||
1. **[LayoutLM](https://huggingface.co/docs/transformers/model_doc/layoutlm)** (from Microsoft Research Asia) released with the paper [LayoutLM: Pre-training of Text and Layout for Document Image Understanding](https://arxiv.org/abs/1912.13318) by Yiheng Xu, Minghao Li, Lei Cui, Shaohan Huang, Furu Wei, Ming Zhou.
|
||||
1. **[LayoutLMv2](https://huggingface.co/docs/transformers/model_doc/layoutlmv2)** (from Microsoft Research Asia) released with the paper [LayoutLMv2: Multi-modal Pre-training for Visually-Rich Document Understanding](https://arxiv.org/abs/2012.14740) by Yang Xu, Yiheng Xu, Tengchao Lv, Lei Cui, Furu Wei, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Wanxiang Che, Min Zhang, Lidong Zhou.
|
||||
1. **[LayoutXLM](https://huggingface.co/docs/transformers/model_doc/layoutlmv2)** (from Microsoft Research Asia) released with the paper [LayoutXLM: Multimodal Pre-training for Multilingual Visually-rich Document Understanding](https://arxiv.org/abs/2104.08836) by Yiheng Xu, Tengchao Lv, Lei Cui, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Furu Wei.
|
||||
@ -257,20 +263,26 @@ Flax, PyTorch, TensorFlow 설치 페이지에서 이들을 conda로 설치하는
|
||||
1. **[LXMERT](https://huggingface.co/docs/transformers/model_doc/lxmert)** (from UNC Chapel Hill) released with the paper [LXMERT: Learning Cross-Modality Encoder Representations from Transformers for Open-Domain Question Answering](https://arxiv.org/abs/1908.07490) by Hao Tan and Mohit Bansal.
|
||||
1. **[M2M100](https://huggingface.co/docs/transformers/model_doc/m2m_100)** (from Facebook) released with the paper [Beyond English-Centric Multilingual Machine Translation](https://arxiv.org/abs/2010.11125) by Angela Fan, Shruti Bhosale, Holger Schwenk, Zhiyi Ma, Ahmed El-Kishky, Siddharth Goyal, Mandeep Baines, Onur Celebi, Guillaume Wenzek, Vishrav Chaudhary, Naman Goyal, Tom Birch, Vitaliy Liptchinsky, Sergey Edunov, Edouard Grave, Michael Auli, Armand Joulin.
|
||||
1. **[MarianMT](https://huggingface.co/docs/transformers/model_doc/marian)** Machine translation models trained using [OPUS](http://opus.nlpl.eu/) data by Jörg Tiedemann. The [Marian Framework](https://marian-nmt.github.io/) is being developed by the Microsoft Translator Team.
|
||||
1. **[MaskFormer](https://huggingface.co/docs/transformers/main/model_doc/maskformer)** (from Meta and UIUC) released with the paper [Per-Pixel Classification is Not All You Need for Semantic Segmentation](https://arxiv.org/abs/2107.06278) by Bowen Cheng, Alexander G. Schwing, Alexander Kirillov.
|
||||
1. **[MBart](https://huggingface.co/docs/transformers/model_doc/mbart)** (from Facebook) released with the paper [Multilingual Denoising Pre-training for Neural Machine Translation](https://arxiv.org/abs/2001.08210) by Yinhan Liu, Jiatao Gu, Naman Goyal, Xian Li, Sergey Edunov, Marjan Ghazvininejad, Mike Lewis, Luke Zettlemoyer.
|
||||
1. **[MBart-50](https://huggingface.co/docs/transformers/model_doc/mbart)** (from Facebook) released with the paper [Multilingual Translation with Extensible Multilingual Pretraining and Finetuning](https://arxiv.org/abs/2008.00401) by Yuqing Tang, Chau Tran, Xian Li, Peng-Jen Chen, Naman Goyal, Vishrav Chaudhary, Jiatao Gu, Angela Fan.
|
||||
1. **[Megatron-BERT](https://huggingface.co/docs/transformers/model_doc/megatron_bert)** (from NVIDIA) released with the paper [Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism](https://arxiv.org/abs/1909.08053) by Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper and Bryan Catanzaro.
|
||||
1. **[Megatron-BERT](https://huggingface.co/docs/transformers/model_doc/megatron-bert)** (from NVIDIA) released with the paper [Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism](https://arxiv.org/abs/1909.08053) by Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper and Bryan Catanzaro.
|
||||
1. **[Megatron-GPT2](https://huggingface.co/docs/transformers/model_doc/megatron_gpt2)** (from NVIDIA) released with the paper [Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism](https://arxiv.org/abs/1909.08053) by Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper and Bryan Catanzaro.
|
||||
1. **[mLUKE](https://huggingface.co/docs/transformers/model_doc/mluke)** (from Studio Ousia) released with the paper [mLUKE: The Power of Entity Representations in Multilingual Pretrained Language Models](https://arxiv.org/abs/2110.08151) by Ryokan Ri, Ikuya Yamada, and Yoshimasa Tsuruoka.
|
||||
1. **[MPNet](https://huggingface.co/docs/transformers/model_doc/mpnet)** (from Microsoft Research) released with the paper [MPNet: Masked and Permuted Pre-training for Language Understanding](https://arxiv.org/abs/2004.09297) by Kaitao Song, Xu Tan, Tao Qin, Jianfeng Lu, Tie-Yan Liu.
|
||||
1. **[MT5](https://huggingface.co/docs/transformers/model_doc/mt5)** (from Google AI) released with the paper [mT5: A massively multilingual pre-trained text-to-text transformer](https://arxiv.org/abs/2010.11934) by Linting Xue, Noah Constant, Adam Roberts, Mihir Kale, Rami Al-Rfou, Aditya Siddhant, Aditya Barua, Colin Raffel.
|
||||
1. **[Nyströmformer](https://huggingface.co/docs/transformers/main/model_doc/nystromformer)** (from the University of Wisconsin - Madison) released with the paper [Nyströmformer: A Nyström-Based Algorithm for Approximating Self-Attention](https://arxiv.org/abs/2102.03902) by Yunyang Xiong, Zhanpeng Zeng, Rudrasis Chakraborty, Mingxing Tan, Glenn Fung, Yin Li, Vikas Singh.
|
||||
1. **[Pegasus](https://huggingface.co/docs/transformers/model_doc/pegasus)** (from Google) released with the paper [PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive Summarization](https://arxiv.org/abs/1912.08777) by Jingqing Zhang, Yao Zhao, Mohammad Saleh and Peter J. Liu.
|
||||
1. **[Perceiver IO](https://huggingface.co/docs/transformers/model_doc/perceiver)** (from Deepmind) released with the paper [Perceiver IO: A General Architecture for Structured Inputs & Outputs](https://arxiv.org/abs/2107.14795) by Andrew Jaegle, Sebastian Borgeaud, Jean-Baptiste Alayrac, Carl Doersch, Catalin Ionescu, David Ding, Skanda Koppula, Daniel Zoran, Andrew Brock, Evan Shelhamer, Olivier Hénaff, Matthew M. Botvinick, Andrew Zisserman, Oriol Vinyals, João Carreira.
|
||||
1. **[PhoBERT](https://huggingface.co/docs/transformers/model_doc/phobert)** (from VinAI Research) released with the paper [PhoBERT: Pre-trained language models for Vietnamese](https://www.aclweb.org/anthology/2020.findings-emnlp.92/) by Dat Quoc Nguyen and Anh Tuan Nguyen.
|
||||
1. **[PLBart](https://huggingface.co/docs/transformers/main/model_doc/plbart)** (from UCLA NLP) released with the paper [Unified Pre-training for Program Understanding and Generation](https://arxiv.org/abs/2103.06333) by Wasi Uddin Ahmad, Saikat Chakraborty, Baishakhi Ray, Kai-Wei Chang.
|
||||
1. **[PoolFormer](https://huggingface.co/docs/transformers/main/model_doc/poolformer)** (from Sea AI Labs) released with the paper [MetaFormer is Actually What You Need for Vision](https://arxiv.org/abs/2111.11418) by Yu, Weihao and Luo, Mi and Zhou, Pan and Si, Chenyang and Zhou, Yichen and Wang, Xinchao and Feng, Jiashi and Yan, Shuicheng.
|
||||
1. **[ProphetNet](https://huggingface.co/docs/transformers/model_doc/prophetnet)** (from Microsoft Research) released with the paper [ProphetNet: Predicting Future N-gram for Sequence-to-Sequence Pre-training](https://arxiv.org/abs/2001.04063) by Yu Yan, Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang and Ming Zhou.
|
||||
1. **[QDQBert](https://huggingface.co/docs/transformers/model_doc/qdqbert)** (from NVIDIA) released with the paper [Integer Quantization for Deep Learning Inference: Principles and Empirical Evaluation](https://arxiv.org/abs/2004.09602) by Hao Wu, Patrick Judd, Xiaojie Zhang, Mikhail Isaev and Paulius Micikevicius.
|
||||
1. **[REALM](https://huggingface.co/docs/transformers/model_doc/realm.html)** (from Google Research) released with the paper [REALM: Retrieval-Augmented Language Model Pre-Training](https://arxiv.org/abs/2002.08909) by Kelvin Guu, Kenton Lee, Zora Tung, Panupong Pasupat and Ming-Wei Chang.
|
||||
1. **[Reformer](https://huggingface.co/docs/transformers/model_doc/reformer)** (from Google Research) released with the paper [Reformer: The Efficient Transformer](https://arxiv.org/abs/2001.04451) by Nikita Kitaev, Łukasz Kaiser, Anselm Levskaya.
|
||||
1. **[RemBERT](https://huggingface.co/docs/transformers/model_doc/rembert)** (from Google Research) released with the paper [Rethinking embedding coupling in pre-trained language models](https://arxiv.org/pdf/2010.12821.pdf) by Hyung Won Chung, Thibault Févry, Henry Tsai, M. Johnson, Sebastian Ruder.
|
||||
1. **[ResNet](https://huggingface.co/docs/transformers/main/model_doc/resnet)** (from Microsoft Research) released with the paper [Deep Residual Learning for Image Recognition](https://arxiv.org/abs/1512.03385) by Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun.
|
||||
1. **[RoBERTa](https://huggingface.co/docs/transformers/model_doc/roberta)** (from Facebook), released together with the paper a [Robustly Optimized BERT Pretraining Approach](https://arxiv.org/abs/1907.11692) by Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, Veselin Stoyanov.
|
||||
1. **[RoFormer](https://huggingface.co/docs/transformers/model_doc/roformer)** (from ZhuiyiTechnology), released together with the paper a [RoFormer: Enhanced Transformer with Rotary Position Embedding](https://arxiv.org/pdf/2104.09864v1.pdf) by Jianlin Su and Yu Lu and Shengfeng Pan and Bo Wen and Yunfeng Liu.
|
||||
1. **[SegFormer](https://huggingface.co/docs/transformers/model_doc/segformer)** (from NVIDIA) released with the paper [SegFormer: Simple and Efficient Design for Semantic Segmentation with Transformers](https://arxiv.org/abs/2105.15203) by Enze Xie, Wenhai Wang, Zhiding Yu, Anima Anandkumar, Jose M. Alvarez, Ping Luo.
|
||||
@ -280,21 +292,31 @@ Flax, PyTorch, TensorFlow 설치 페이지에서 이들을 conda로 설치하는
|
||||
1. **[SpeechToTextTransformer2](https://huggingface.co/docs/transformers/model_doc/speech_to_text_2)** (from Facebook), released together with the paper [Large-Scale Self- and Semi-Supervised Learning for Speech Translation](https://arxiv.org/abs/2104.06678) by Changhan Wang, Anne Wu, Juan Pino, Alexei Baevski, Michael Auli, Alexis Conneau.
|
||||
1. **[Splinter](https://huggingface.co/docs/transformers/model_doc/splinter)** (from Tel Aviv University), released together with the paper [Few-Shot Question Answering by Pretraining Span Selection](https://arxiv.org/abs/2101.00438) by Ori Ram, Yuval Kirstain, Jonathan Berant, Amir Globerson, Omer Levy.
|
||||
1. **[SqueezeBert](https://huggingface.co/docs/transformers/model_doc/squeezebert)** (from Berkeley) released with the paper [SqueezeBERT: What can computer vision teach NLP about efficient neural networks?](https://arxiv.org/abs/2006.11316) by Forrest N. Iandola, Albert E. Shaw, Ravi Krishna, and Kurt W. Keutzer.
|
||||
1. **[Swin Transformer](https://huggingface.co/docs/transformers/main/model_doc/swin)** (from Microsoft) released with the paper [Swin Transformer: Hierarchical Vision Transformer using Shifted Windows](https://arxiv.org/abs/2103.14030) by Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, Baining Guo.
|
||||
1. **[T5](https://huggingface.co/docs/transformers/model_doc/t5)** (from Google AI) released with the paper [Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer](https://arxiv.org/abs/1910.10683) by Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu.
|
||||
1. **[T5v1.1](https://huggingface.co/docs/transformers/model_doc/t5v1.1)** (from Google AI) released in the repository [google-research/text-to-text-transfer-transformer](https://github.com/google-research/text-to-text-transfer-transformer/blob/main/released_checkpoints.md#t511) by Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu.
|
||||
1. **[TAPAS](https://huggingface.co/docs/transformers/model_doc/tapas)** (from Google AI) released with the paper [TAPAS: Weakly Supervised Table Parsing via Pre-training](https://arxiv.org/abs/2004.02349) by Jonathan Herzig, Paweł Krzysztof Nowak, Thomas Müller, Francesco Piccinno and Julian Martin Eisenschlos.
|
||||
1. **[Transformer-XL](https://huggingface.co/docs/transformers/model_doc/transformerxl)** (from Google/CMU) released with the paper [Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context](https://arxiv.org/abs/1901.02860) by Zihang Dai*, Zhilin Yang*, Yiming Yang, Jaime Carbonell, Quoc V. Le, Ruslan Salakhutdinov.
|
||||
1. **[Transformer-XL](https://huggingface.co/docs/transformers/model_doc/transfo-xl)** (from Google/CMU) released with the paper [Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context](https://arxiv.org/abs/1901.02860) by Zihang Dai*, Zhilin Yang*, Yiming Yang, Jaime Carbonell, Quoc V. Le, Ruslan Salakhutdinov.
|
||||
1. **[TrOCR](https://huggingface.co/docs/transformers/model_doc/trocr)** (from Microsoft), released together with the paper [TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models](https://arxiv.org/abs/2109.10282) by Minghao Li, Tengchao Lv, Lei Cui, Yijuan Lu, Dinei Florencio, Cha Zhang, Zhoujun Li, Furu Wei.
|
||||
1. **[UniSpeech](https://huggingface.co/docs/transformers/model_doc/unispeech)** (from Microsoft Research) released with the paper [UniSpeech: Unified Speech Representation Learning with Labeled and Unlabeled Data](https://arxiv.org/abs/2101.07597) by Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei, Michael Zeng, Xuedong Huang.
|
||||
1. **[UniSpeechSat](https://huggingface.co/docs/transformers/model_doc/unispeech_sat)** (from Microsoft Research) released with the paper [UNISPEECH-SAT: UNIVERSAL SPEECH REPRESENTATION LEARNING WITH SPEAKER AWARE PRE-TRAINING](https://arxiv.org/abs/2110.05752) by Sanyuan Chen, Yu Wu, Chengyi Wang, Zhengyang Chen, Zhuo Chen, Shujie Liu, Jian Wu, Yao Qian, Furu Wei, Jinyu Li, Xiangzhan Yu.
|
||||
1. **[UniSpeechSat](https://huggingface.co/docs/transformers/model_doc/unispeech-sat)** (from Microsoft Research) released with the paper [UNISPEECH-SAT: UNIVERSAL SPEECH REPRESENTATION LEARNING WITH SPEAKER AWARE PRE-TRAINING](https://arxiv.org/abs/2110.05752) by Sanyuan Chen, Yu Wu, Chengyi Wang, Zhengyang Chen, Zhuo Chen, Shujie Liu, Jian Wu, Yao Qian, Furu Wei, Jinyu Li, Xiangzhan Yu.
|
||||
1. **[VAN](https://huggingface.co/docs/transformers/main/model_doc/van)** (from Tsinghua University and Nankai University) released with the paper [Visual Attention Network](https://arxiv.org/pdf/2202.09741.pdf) by Meng-Hao Guo, Cheng-Ze Lu, Zheng-Ning Liu, Ming-Ming Cheng, Shi-Min Hu.
|
||||
1. **[ViLT](https://huggingface.co/docs/transformers/main/model_doc/vilt)** (from NAVER AI Lab/Kakao Enterprise/Kakao Brain) released with the paper [ViLT: Vision-and-Language Transformer Without Convolution or Region Supervision](https://arxiv.org/abs/2102.03334) by Wonjae Kim, Bokyung Son, Ildoo Kim.
|
||||
1. **[Vision Transformer (ViT)](https://huggingface.co/docs/transformers/model_doc/vit)** (from Google AI) released with the paper [An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale](https://arxiv.org/abs/2010.11929) by Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, Neil Houlsby.
|
||||
1. **[VisualBERT](https://huggingface.co/docs/transformers/model_doc/visual_bert)** (from UCLA NLP) released with the paper [VisualBERT: A Simple and Performant Baseline for Vision and Language](https://arxiv.org/pdf/1908.03557) by Liunian Harold Li, Mark Yatskar, Da Yin, Cho-Jui Hsieh, Kai-Wei Chang.
|
||||
1. **[ViTMAE](https://huggingface.co/docs/transformers/main/model_doc/vit_mae)** (from Meta AI) released with the paper [Masked Autoencoders Are Scalable Vision Learners](https://arxiv.org/abs/2111.06377) by Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Dollár, Ross Girshick.
|
||||
1. **[Wav2Vec2](https://huggingface.co/docs/transformers/model_doc/wav2vec2)** (from Facebook AI) released with the paper [wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations](https://arxiv.org/abs/2006.11477) by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli.
|
||||
1. **[Wav2Vec2Phoneme](https://huggingface.co/docs/transformers/model_doc/wav2vec2_phoneme)** (from Facebook AI) released with the paper [Simple and Effective Zero-shot Cross-lingual Phoneme Recognition](https://arxiv.org/abs/2109.11680) by Qiantong Xu, Alexei Baevski, Michael Auli.
|
||||
1. **[WavLM](https://huggingface.co/docs/transformers/main/model_doc/wavlm)** (from Microsoft Research) released with the paper [WavLM: Large-Scale Self-Supervised Pre-Training for Full Stack Speech Processing](https://arxiv.org/abs/2110.13900) by Sanyuan Chen, Chengyi Wang, Zhengyang Chen, Yu Wu, Shujie Liu, Zhuo Chen, Jinyu Li, Naoyuki Kanda, Takuya Yoshioka, Xiong Xiao, Jian Wu, Long Zhou, Shuo Ren, Yanmin Qian, Yao Qian, Jian Wu, Michael Zeng, Furu Wei.
|
||||
1. **[XGLM](https://huggingface.co/docs/transformers/model_doc/xglm)** (From Facebook AI) released with the paper [Few-shot Learning with Multilingual Language Models](https://arxiv.org/abs/2112.10668) by Xi Victoria Lin, Todor Mihaylov, Mikel Artetxe, Tianlu Wang, Shuohui Chen, Daniel Simig, Myle Ott, Naman Goyal, Shruti Bhosale, Jingfei Du, Ramakanth Pasunuru, Sam Shleifer, Punit Singh Koura, Vishrav Chaudhary, Brian O'Horo, Jeff Wang, Luke Zettlemoyer, Zornitsa Kozareva, Mona Diab, Veselin Stoyanov, Xian Li.
|
||||
1. **[XLM](https://huggingface.co/docs/transformers/model_doc/xlm)** (from Facebook) released together with the paper [Cross-lingual Language Model Pretraining](https://arxiv.org/abs/1901.07291) by Guillaume Lample and Alexis Conneau.
|
||||
1. **[XLM-ProphetNet](https://huggingface.co/docs/transformers/model_doc/xlmprophetnet)** (from Microsoft Research) released with the paper [ProphetNet: Predicting Future N-gram for Sequence-to-Sequence Pre-training](https://arxiv.org/abs/2001.04063) by Yu Yan, Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang and Ming Zhou.
|
||||
1. **[XLM-RoBERTa](https://huggingface.co/docs/transformers/model_doc/xlmroberta)** (from Facebook AI), released together with the paper [Unsupervised Cross-lingual Representation Learning at Scale](https://arxiv.org/abs/1911.02116) by Alexis Conneau*, Kartikay Khandelwal*, Naman Goyal, Vishrav Chaudhary, Guillaume Wenzek, Francisco Guzmán, Edouard Grave, Myle Ott, Luke Zettlemoyer and Veselin Stoyanov.
|
||||
1. **[XLM-ProphetNet](https://huggingface.co/docs/transformers/model_doc/xlm-prophetnet)** (from Microsoft Research) released with the paper [ProphetNet: Predicting Future N-gram for Sequence-to-Sequence Pre-training](https://arxiv.org/abs/2001.04063) by Yu Yan, Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang and Ming Zhou.
|
||||
1. **[XLM-RoBERTa](https://huggingface.co/docs/transformers/model_doc/xlm-roberta)** (from Facebook AI), released together with the paper [Unsupervised Cross-lingual Representation Learning at Scale](https://arxiv.org/abs/1911.02116) by Alexis Conneau*, Kartikay Khandelwal*, Naman Goyal, Vishrav Chaudhary, Guillaume Wenzek, Francisco Guzmán, Edouard Grave, Myle Ott, Luke Zettlemoyer and Veselin Stoyanov.
|
||||
1. **[XLM-RoBERTa-XL](https://huggingface.co/docs/transformers/model_doc/xlm-roberta-xl)** (from Facebook AI) released with the paper [Larger-Scale Transformers for Multilingual Masked Language Modeling](https://arxiv.org/abs/2105.00572) by Naman Goyal, Jingfei Du, Myle Ott, Giri Anantharaman, Alexis Conneau.
|
||||
1. **[XLNet](https://huggingface.co/docs/transformers/model_doc/xlnet)** (from Google/CMU) released with the paper [XLNet: Generalized Autoregressive Pretraining for Language Understanding](https://arxiv.org/abs/1906.08237) by Zhilin Yang*, Zihang Dai*, Yiming Yang, Jaime Carbonell, Ruslan Salakhutdinov, Quoc V. Le.
|
||||
1. **[XLS-R](https://huggingface.co/docs/transformers/model_doc/xls_r)** (from Facebook AI) released with the paper [XLS-R: Self-supervised Cross-lingual Speech Representation Learning at Scale](https://arxiv.org/abs/2111.09296) by Arun Babu, Changhan Wang, Andros Tjandra, Kushal Lakhotia, Qiantong Xu, Naman Goyal, Kritika Singh, Patrick von Platen, Yatharth Saraf, Juan Pino, Alexei Baevski, Alexis Conneau, Michael Auli.
|
||||
1. **[XLSR-Wav2Vec2](https://huggingface.co/docs/transformers/model_doc/xlsr_wav2vec2)** (from Facebook AI) released with the paper [Unsupervised Cross-Lingual Representation Learning For Speech Recognition](https://arxiv.org/abs/2006.13979) by Alexis Conneau, Alexei Baevski, Ronan Collobert, Abdelrahman Mohamed, Michael Auli.
|
||||
1. **[YOSO](https://huggingface.co/docs/transformers/main/model_doc/yoso)** (from the University of Wisconsin - Madison) released with the paper [You Only Sample (Almost) by Zhanpeng Zeng, Yunyang Xiong, Sathya N. Ravi, Shailesh Acharya, Glenn Fung, Vikas Singh.
|
||||
1. 새로운 모델을 올리고 싶나요? 우리가 **상세한 가이드와 템플릿** 으로 새로운 모델을 올리도록 도와드릴게요. 가이드와 템플릿은 이 저장소의 [`templates`](./templates) 폴더에서 확인하실 수 있습니다. [컨트리뷰션 가이드라인](./CONTRIBUTING.md)을 꼭 확인해주시고, PR을 올리기 전에 메인테이너에게 연락하거나 이슈를 오픈해 피드백을 받으시길 바랍니다.
|
||||
|
||||
각 모델이 Flax, PyTorch, TensorFlow으로 구현되었는지 또는 🤗 Tokenizers 라이브러리가 지원하는 토크나이저를 사용하는지 확인하려면, [이 표](https://huggingface.co/docs/transformers/index#supported-frameworks)를 확인하세요.
|
||||
@ -309,7 +331,7 @@ Flax, PyTorch, TensorFlow 설치 페이지에서 이들을 conda로 설치하는
|
||||
| [과제 요약](https://huggingface.co/docs/transformers/task_summary) | 🤗 Transformers가 지원하는 과제들 |
|
||||
| [전처리 튜토리얼](https://huggingface.co/docs/transformers/preprocessing) | `Tokenizer` 클래스를 이용해 모델을 위한 데이터 준비하기 |
|
||||
| [학습과 fine-tuning](https://huggingface.co/docs/transformers/training) | 🤗 Transformers가 제공하는 모델 PyTorch/TensorFlow 학습 과정과 `Trainer` API에서 사용하기 |
|
||||
| [퀵 투어: Fine-tuning/사용 스크립트](https://github.com/huggingface/transformers/tree/master/examples) | 다양한 과제에서 모델 fine-tuning하는 예시 스크립트 |
|
||||
| [퀵 투어: Fine-tuning/사용 스크립트](https://github.com/huggingface/transformers/tree/main/examples) | 다양한 과제에서 모델 fine-tuning하는 예시 스크립트 |
|
||||
| [모델 공유 및 업로드](https://huggingface.co/docs/transformers/model_sharing) | 커뮤니티에 fine-tune된 모델을 업로드 및 공유하기 |
|
||||
| [마이그레이션](https://huggingface.co/docs/transformers/migration) | `pytorch-transformers`나 `pytorch-pretrained-bert`에서 🤗 Transformers로 이동하기|
|
||||
|
||||
|
||||
@ -41,14 +41,14 @@ checkpoint: 检查点
|
||||
|
||||
<p align="center">
|
||||
<br>
|
||||
<img src="https://raw.githubusercontent.com/huggingface/transformers/master/docs/source/imgs/transformers_logo_name.png" width="400"/>
|
||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers_logo_name.png" width="400"/>
|
||||
<br>
|
||||
<p>
|
||||
<p align="center">
|
||||
<a href="https://circleci.com/gh/huggingface/transformers">
|
||||
<img alt="Build" src="https://img.shields.io/circleci/build/github/huggingface/transformers/master">
|
||||
<img alt="Build" src="https://img.shields.io/circleci/build/github/huggingface/transformers/main">
|
||||
</a>
|
||||
<a href="https://github.com/huggingface/transformers/blob/master/LICENSE">
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/LICENSE">
|
||||
<img alt="GitHub" src="https://img.shields.io/github/license/huggingface/transformers.svg?color=blue">
|
||||
</a>
|
||||
<a href="https://huggingface.co/docs/transformers/index">
|
||||
@ -57,7 +57,7 @@ checkpoint: 检查点
|
||||
<a href="https://github.com/huggingface/transformers/releases">
|
||||
<img alt="GitHub release" src="https://img.shields.io/github/release/huggingface/transformers.svg">
|
||||
</a>
|
||||
<a href="https://github.com/huggingface/transformers/blob/master/CODE_OF_CONDUCT.md">
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/CODE_OF_CONDUCT.md">
|
||||
<img alt="Contributor Covenant" src="https://img.shields.io/badge/Contributor%20Covenant-v2.0%20adopted-ff69b4.svg">
|
||||
</a>
|
||||
<a href="https://zenodo.org/badge/latestdoi/155220641"><img src="https://zenodo.org/badge/155220641.svg" alt="DOI"></a>
|
||||
@ -67,8 +67,8 @@ checkpoint: 检查点
|
||||
<p>
|
||||
<a href="https://github.com/huggingface/transformers/">English</a> |
|
||||
<b>简体中文</b> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/master/README_zh-hant.md">繁體中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/master/README_ko.md">한국어</a>
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_zh-hant.md">繁體中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_ko.md">한국어</a>
|
||||
<p>
|
||||
</h4>
|
||||
|
||||
@ -77,7 +77,7 @@ checkpoint: 检查点
|
||||
</h3>
|
||||
|
||||
<h3 align="center">
|
||||
<a href="https://hf.co/course"><img src="https://raw.githubusercontent.com/huggingface/transformers/master/docs/source/imgs/course_banner.png"></a>
|
||||
<a href="https://hf.co/course"><img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/course_banner.png"></a>
|
||||
</h3>
|
||||
|
||||
🤗 Transformers 提供了数以千计的预训练模型,支持 100 多种语言的文本分类、信息抽取、问答、摘要、翻译、文本生成。它的宗旨让最先进的 NLP 技术人人易用。
|
||||
@ -191,7 +191,7 @@ checkpoint: 检查点
|
||||
|
||||
- 本库并不是模块化的神经网络工具箱。模型文件中的代码特意呈若璞玉,未经额外抽象封装,以便研究人员快速迭代魔改而不致溺于抽象和文件跳转之中。
|
||||
- `Trainer` API 并非兼容任何模型,只为本库之模型优化。若是在寻找适用于通用机器学习的训练循环实现,请另觅他库。
|
||||
- 尽管我们已尽力而为,[examples 目录](https://github.com/huggingface/transformers/tree/master/examples)中的脚本也仅为用例而已。对于你的特定问题,它们并不一定开箱即用,可能需要改几行代码以适之。
|
||||
- 尽管我们已尽力而为,[examples 目录](https://github.com/huggingface/transformers/tree/main/examples)中的脚本也仅为用例而已。对于你的特定问题,它们并不一定开箱即用,可能需要改几行代码以适之。
|
||||
|
||||
## 安装
|
||||
|
||||
@ -239,39 +239,45 @@ conda install -c huggingface transformers
|
||||
1. **[BARTpho](https://huggingface.co/docs/transformers/model_doc/bartpho)** (来自 VinAI Research) 伴随论文 [BARTpho: Pre-trained Sequence-to-Sequence Models for Vietnamese](https://arxiv.org/abs/2109.09701) 由 Nguyen Luong Tran, Duong Minh Le and Dat Quoc Nguyen 发布。
|
||||
1. **[BEiT](https://huggingface.co/docs/transformers/model_doc/beit)** (来自 Microsoft) 伴随论文 [BEiT: BERT Pre-Training of Image Transformers](https://arxiv.org/abs/2106.08254) 由 Hangbo Bao, Li Dong, Furu Wei 发布。
|
||||
1. **[BERT](https://huggingface.co/docs/transformers/model_doc/bert)** (来自 Google) 伴随论文 [BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding](https://arxiv.org/abs/1810.04805) 由 Jacob Devlin, Ming-Wei Chang, Kenton Lee and Kristina Toutanova 发布。
|
||||
1. **[BERT For Sequence Generation](https://huggingface.co/docs/transformers/model_doc/bertgeneration)** (来自 Google) 伴随论文 [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://arxiv.org/abs/1907.12461) 由 Sascha Rothe, Shashi Narayan, Aliaksei Severyn 发布。
|
||||
1. **[BERT For Sequence Generation](https://huggingface.co/docs/transformers/model_doc/bert-generation)** (来自 Google) 伴随论文 [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://arxiv.org/abs/1907.12461) 由 Sascha Rothe, Shashi Narayan, Aliaksei Severyn 发布。
|
||||
1. **[BERTweet](https://huggingface.co/docs/transformers/model_doc/bertweet)** (来自 VinAI Research) 伴随论文 [BERTweet: A pre-trained language model for English Tweets](https://aclanthology.org/2020.emnlp-demos.2/) 由 Dat Quoc Nguyen, Thanh Vu and Anh Tuan Nguyen 发布。
|
||||
1. **[BigBird-Pegasus](https://huggingface.co/docs/transformers/model_doc/bigbird_pegasus)** (来自 Google Research) 伴随论文 [Big Bird: Transformers for Longer Sequences](https://arxiv.org/abs/2007.14062) 由 Manzil Zaheer, Guru Guruganesh, Avinava Dubey, Joshua Ainslie, Chris Alberti, Santiago Ontanon, Philip Pham, Anirudh Ravula, Qifan Wang, Li Yang, Amr Ahmed 发布。
|
||||
1. **[BigBird-RoBERTa](https://huggingface.co/docs/transformers/model_doc/bigbird)** (来自 Google Research) 伴随论文 [Big Bird: Transformers for Longer Sequences](https://arxiv.org/abs/2007.14062) 由 Manzil Zaheer, Guru Guruganesh, Avinava Dubey, Joshua Ainslie, Chris Alberti, Santiago Ontanon, Philip Pham, Anirudh Ravula, Qifan Wang, Li Yang, Amr Ahmed 发布。
|
||||
1. **[BigBird-RoBERTa](https://huggingface.co/docs/transformers/model_doc/big_bird)** (来自 Google Research) 伴随论文 [Big Bird: Transformers for Longer Sequences](https://arxiv.org/abs/2007.14062) 由 Manzil Zaheer, Guru Guruganesh, Avinava Dubey, Joshua Ainslie, Chris Alberti, Santiago Ontanon, Philip Pham, Anirudh Ravula, Qifan Wang, Li Yang, Amr Ahmed 发布。
|
||||
1. **[Blenderbot](https://huggingface.co/docs/transformers/model_doc/blenderbot)** (来自 Facebook) 伴随论文 [Recipes for building an open-domain chatbot](https://arxiv.org/abs/2004.13637) 由 Stephen Roller, Emily Dinan, Naman Goyal, Da Ju, Mary Williamson, Yinhan Liu, Jing Xu, Myle Ott, Kurt Shuster, Eric M. Smith, Y-Lan Boureau, Jason Weston 发布。
|
||||
1. **[BlenderbotSmall](https://huggingface.co/docs/transformers/model_doc/blenderbot_small)** (来自 Facebook) 伴随论文 [Recipes for building an open-domain chatbot](https://arxiv.org/abs/2004.13637) 由 Stephen Roller, Emily Dinan, Naman Goyal, Da Ju, Mary Williamson, Yinhan Liu, Jing Xu, Myle Ott, Kurt Shuster, Eric M. Smith, Y-Lan Boureau, Jason Weston 发布。
|
||||
1. **[BlenderbotSmall](https://huggingface.co/docs/transformers/model_doc/blenderbot-small)** (来自 Facebook) 伴随论文 [Recipes for building an open-domain chatbot](https://arxiv.org/abs/2004.13637) 由 Stephen Roller, Emily Dinan, Naman Goyal, Da Ju, Mary Williamson, Yinhan Liu, Jing Xu, Myle Ott, Kurt Shuster, Eric M. Smith, Y-Lan Boureau, Jason Weston 发布。
|
||||
1. **[BORT](https://huggingface.co/docs/transformers/model_doc/bort)** (来自 Alexa) 伴随论文 [Optimal Subarchitecture Extraction For BERT](https://arxiv.org/abs/2010.10499) 由 Adrian de Wynter and Daniel J. Perry 发布。
|
||||
1. **[ByT5](https://huggingface.co/docs/transformers/model_doc/byt5)** (来自 Google Research) 伴随论文 [ByT5: Towards a token-free future with pre-trained byte-to-byte models](https://arxiv.org/abs/2105.13626) 由 Linting Xue, Aditya Barua, Noah Constant, Rami Al-Rfou, Sharan Narang, Mihir Kale, Adam Roberts, Colin Raffel 发布。
|
||||
1. **[CamemBERT](https://huggingface.co/docs/transformers/model_doc/camembert)** (来自 Inria/Facebook/Sorbonne) 伴随论文 [CamemBERT: a Tasty French Language Model](https://arxiv.org/abs/1911.03894) 由 Louis Martin*, Benjamin Muller*, Pedro Javier Ortiz Suárez*, Yoann Dupont, Laurent Romary, Éric Villemonte de la Clergerie, Djamé Seddah and Benoît Sagot 发布。
|
||||
1. **[CANINE](https://huggingface.co/docs/transformers/model_doc/canine)** (来自 Google Research) 伴随论文 [CANINE: Pre-training an Efficient Tokenization-Free Encoder for Language Representation](https://arxiv.org/abs/2103.06874) 由 Jonathan H. Clark, Dan Garrette, Iulia Turc, John Wieting 发布。
|
||||
1. **[CLIP](https://huggingface.co/docs/transformers/model_doc/clip)** (来自 OpenAI) 伴随论文 [Learning Transferable Visual Models From Natural Language Supervision](https://arxiv.org/abs/2103.00020) 由 Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, Ilya Sutskever 发布。
|
||||
1. **[ConvBERT](https://huggingface.co/docs/transformers/model_doc/convbert)** (来自 YituTech) 伴随论文 [ConvBERT: Improving BERT with Span-based Dynamic Convolution](https://arxiv.org/abs/2008.02496) 由 Zihang Jiang, Weihao Yu, Daquan Zhou, Yunpeng Chen, Jiashi Feng, Shuicheng Yan 发布。
|
||||
1. **[ConvNeXT](https://huggingface.co/docs/transformers/main/model_doc/convnext)** (来自 Facebook AI) 伴随论文 [A ConvNet for the 2020s](https://arxiv.org/abs/2201.03545) 由 Zhuang Liu, Hanzi Mao, Chao-Yuan Wu, Christoph Feichtenhofer, Trevor Darrell, Saining Xie 发布。
|
||||
1. **[CPM](https://huggingface.co/docs/transformers/model_doc/cpm)** (来自 Tsinghua University) 伴随论文 [CPM: A Large-scale Generative Chinese Pre-trained Language Model](https://arxiv.org/abs/2012.00413) 由 Zhengyan Zhang, Xu Han, Hao Zhou, Pei Ke, Yuxian Gu, Deming Ye, Yujia Qin, Yusheng Su, Haozhe Ji, Jian Guan, Fanchao Qi, Xiaozhi Wang, Yanan Zheng, Guoyang Zeng, Huanqi Cao, Shengqi Chen, Daixuan Li, Zhenbo Sun, Zhiyuan Liu, Minlie Huang, Wentao Han, Jie Tang, Juanzi Li, Xiaoyan Zhu, Maosong Sun 发布。
|
||||
1. **[CTRL](https://huggingface.co/docs/transformers/model_doc/ctrl)** (来自 Salesforce) 伴随论文 [CTRL: A Conditional Transformer Language Model for Controllable Generation](https://arxiv.org/abs/1909.05858) 由 Nitish Shirish Keskar*, Bryan McCann*, Lav R. Varshney, Caiming Xiong and Richard Socher 发布。
|
||||
1. **[Data2Vec](https://huggingface.co/docs/transformers/main/model_doc/data2vec)** (来自 Facebook) 伴随论文 [Data2Vec: A General Framework for Self-supervised Learning in Speech, Vision and Language](https://arxiv.org/abs/2202.03555) 由 Alexei Baevski, Wei-Ning Hsu, Qiantong Xu, Arun Babu, Jiatao Gu, Michael Auli 发布。
|
||||
1. **[DeBERTa](https://huggingface.co/docs/transformers/model_doc/deberta)** (来自 Microsoft) 伴随论文 [DeBERTa: Decoding-enhanced BERT with Disentangled Attention](https://arxiv.org/abs/2006.03654) 由 Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen 发布。
|
||||
1. **[DeBERTa-v2](https://huggingface.co/docs/transformers/model_doc/deberta_v2)** (来自 Microsoft) 伴随论文 [DeBERTa: Decoding-enhanced BERT with Disentangled Attention](https://arxiv.org/abs/2006.03654) 由 Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen 发布。
|
||||
1. **[DeBERTa-v2](https://huggingface.co/docs/transformers/model_doc/deberta-v2)** (来自 Microsoft) 伴随论文 [DeBERTa: Decoding-enhanced BERT with Disentangled Attention](https://arxiv.org/abs/2006.03654) 由 Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen 发布。
|
||||
1. **[Decision Transformer](https://huggingface.co/docs/transformers/model_doc/decision_transformer)** (来自 Berkeley/Facebook/Google) 伴随论文 [Decision Transformer: Reinforcement Learning via Sequence Modeling](https://arxiv.org/abs/2106.01345) 由 Lili Chen, Kevin Lu, Aravind Rajeswaran, Kimin Lee, Aditya Grover, Michael Laskin, Pieter Abbeel, Aravind Srinivas, Igor Mordatch 发布。
|
||||
1. **[DeiT](https://huggingface.co/docs/transformers/model_doc/deit)** (来自 Facebook) 伴随论文 [Training data-efficient image transformers & distillation through attention](https://arxiv.org/abs/2012.12877) 由 Hugo Touvron, Matthieu Cord, Matthijs Douze, Francisco Massa, Alexandre Sablayrolles, Hervé Jégou 发布。
|
||||
1. **[DETR](https://huggingface.co/docs/transformers/model_doc/detr)** (来自 Facebook) 伴随论文 [End-to-End Object Detection with Transformers](https://arxiv.org/abs/2005.12872) 由 Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, Sergey Zagoruyko 发布。
|
||||
1. **[DialoGPT](https://huggingface.co/docs/transformers/model_doc/dialogpt)** (来自 Microsoft Research) 伴随论文 [DialoGPT: Large-Scale Generative Pre-training for Conversational Response Generation](https://arxiv.org/abs/1911.00536) 由 Yizhe Zhang, Siqi Sun, Michel Galley, Yen-Chun Chen, Chris Brockett, Xiang Gao, Jianfeng Gao, Jingjing Liu, Bill Dolan 发布。
|
||||
1. **[DistilBERT](https://huggingface.co/docs/transformers/model_doc/distilbert)** (来自 HuggingFace), 伴随论文 [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 同样的方法也应用于压缩 GPT-2 到 [DistilGPT2](https://github.com/huggingface/transformers/tree/master/examples/distillation), RoBERTa 到 [DistilRoBERTa](https://github.com/huggingface/transformers/tree/master/examples/distillation), Multilingual BERT 到 [DistilmBERT](https://github.com/huggingface/transformers/tree/master/examples/distillation) 和德语版 DistilBERT。
|
||||
1. **[DistilBERT](https://huggingface.co/docs/transformers/model_doc/distilbert)** (来自 HuggingFace), 伴随论文 [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 同样的方法也应用于压缩 GPT-2 到 [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa 到 [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation), Multilingual BERT 到 [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) 和德语版 DistilBERT。
|
||||
1. **[DiT](https://huggingface.co/docs/transformers/model_doc/dit)** (来自 Microsoft Research) 伴随论文 [DiT: Self-supervised Pre-training for Document Image Transformer](https://arxiv.org/abs/2203.02378) 由 Junlong Li, Yiheng Xu, Tengchao Lv, Lei Cui, Cha Zhang, Furu Wei 发布。
|
||||
1. **[DPR](https://huggingface.co/docs/transformers/model_doc/dpr)** (来自 Facebook) 伴随论文 [Dense Passage Retrieval for Open-Domain Question Answering](https://arxiv.org/abs/2004.04906) 由 Vladimir Karpukhin, Barlas Oğuz, Sewon Min, Patrick Lewis, Ledell Wu, Sergey Edunov, Danqi Chen, and Wen-tau Yih 发布。
|
||||
1. **[DPT](https://huggingface.co/docs/transformers/master/model_doc/dpt)** (来自 Intel Labs) 伴随论文 [Vision Transformers for Dense Prediction](https://arxiv.org/abs/2103.13413) 由 René Ranftl, Alexey Bochkovskiy, Vladlen Koltun 发布。
|
||||
1. **[ELECTRA](https://huggingface.co/docs/transformers/model_doc/electra)** (来自 Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le, Christopher D. Manning 发布。
|
||||
1. **[EncoderDecoder](https://huggingface.co/docs/transformers/model_doc/encoderdecoder)** (来自 Google Research) 伴随论文 [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://arxiv.org/abs/1907.12461) 由 Sascha Rothe, Shashi Narayan, Aliaksei Severyn 发布。
|
||||
1. **[EncoderDecoder](https://huggingface.co/docs/transformers/model_doc/encoder-decoder)** (来自 Google Research) 伴随论文 [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://arxiv.org/abs/1907.12461) 由 Sascha Rothe, Shashi Narayan, Aliaksei Severyn 发布。
|
||||
1. **[FlauBERT](https://huggingface.co/docs/transformers/model_doc/flaubert)** (来自 CNRS) 伴随论文 [FlauBERT: Unsupervised Language Model Pre-training for French](https://arxiv.org/abs/1912.05372) 由 Hang Le, Loïc Vial, Jibril Frej, Vincent Segonne, Maximin Coavoux, Benjamin Lecouteux, Alexandre Allauzen, Benoît Crabbé, Laurent Besacier, Didier Schwab 发布。
|
||||
1. **[FNet](https://huggingface.co/docs/transformers/model_doc/fnet)** (来自 Google Research) 伴随论文 [FNet: Mixing Tokens with Fourier Transforms](https://arxiv.org/abs/2105.03824) 由 James Lee-Thorp, Joshua Ainslie, Ilya Eckstein, Santiago Ontanon 发布。
|
||||
1. **[Funnel Transformer](https://huggingface.co/docs/transformers/model_doc/funnel)** (来自 CMU/Google Brain) 伴随论文 [Funnel-Transformer: Filtering out Sequential Redundancy for Efficient Language Processing](https://arxiv.org/abs/2006.03236) 由 Zihang Dai, Guokun Lai, Yiming Yang, Quoc V. Le 发布。
|
||||
1. **[GPT](https://huggingface.co/docs/transformers/model_doc/gpt)** (来自 OpenAI) 伴随论文 [Improving Language Understanding by Generative Pre-Training](https://blog.openai.com/language-unsupervised/) 由 Alec Radford, Karthik Narasimhan, Tim Salimans and Ilya Sutskever 发布。
|
||||
1. **[GLPN](https://huggingface.co/docs/transformers/main/model_doc/glpn)** (来自 KAIST) 伴随论文 [Global-Local Path Networks for Monocular Depth Estimation with Vertical CutDepth](https://arxiv.org/abs/2201.07436) 由 Doyeon Kim, Woonghyun Ga, Pyungwhan Ahn, Donggyu Joo, Sehwan Chun, Junmo Kim 发布。
|
||||
1. **[GPT](https://huggingface.co/docs/transformers/model_doc/openai-gpt)** (来自 OpenAI) 伴随论文 [Improving Language Understanding by Generative Pre-Training](https://blog.openai.com/language-unsupervised/) 由 Alec Radford, Karthik Narasimhan, Tim Salimans and Ilya Sutskever 发布。
|
||||
1. **[GPT Neo](https://huggingface.co/docs/transformers/model_doc/gpt_neo)** (来自 EleutherAI) 随仓库 [EleutherAI/gpt-neo](https://github.com/EleutherAI/gpt-neo) 发布。作者为 Sid Black, Stella Biderman, Leo Gao, Phil Wang and Connor Leahy 发布。
|
||||
1. **[GPT-2](https://huggingface.co/docs/transformers/model_doc/gpt2)** (来自 OpenAI) 伴随论文 [Language Models are Unsupervised Multitask Learners](https://blog.openai.com/better-language-models/) 由 Alec Radford*, Jeffrey Wu*, Rewon Child, David Luan, Dario Amodei** and Ilya Sutskever** 发布。
|
||||
1. **[GPT-J](https://huggingface.co/docs/transformers/model_doc/gptj)** (来自 EleutherAI) 伴随论文 [kingoflolz/mesh-transformer-jax](https://github.com/kingoflolz/mesh-transformer-jax/) 由 Ben Wang and Aran Komatsuzaki 发布。
|
||||
1. **[Hubert](https://huggingface.co/docs/transformers/model_doc/hubert)** (来自 Facebook) 伴随论文 [HuBERT: Self-Supervised Speech Representation Learning by Masked Prediction of Hidden Units](https://arxiv.org/abs/2106.07447) 由 Wei-Ning Hsu, Benjamin Bolte, Yao-Hung Hubert Tsai, Kushal Lakhotia, Ruslan Salakhutdinov, Abdelrahman Mohamed 发布。
|
||||
1. **[I-BERT](https://huggingface.co/docs/transformers/model_doc/ibert)** (来自 Berkeley) 伴随论文 [I-BERT: Integer-only BERT Quantization](https://arxiv.org/abs/2101.01321) 由 Sehoon Kim, Amir Gholami, Zhewei Yao, Michael W. Mahoney, Kurt Keutzer 发布。
|
||||
1. **[ImageGPT](https://huggingface.co/docs/transformers/master/model_doc/imagegpt)** (来自 OpenAI) 伴随论文 [Generative Pretraining from Pixels](https://openai.com/blog/image-gpt/) 由 Mark Chen, Alec Radford, Rewon Child, Jeffrey Wu, Heewoo Jun, David Luan, Ilya Sutskever 发布。
|
||||
1. **[ImageGPT](https://huggingface.co/docs/transformers/main/model_doc/imagegpt)** (来自 OpenAI) 伴随论文 [Generative Pretraining from Pixels](https://openai.com/blog/image-gpt/) 由 Mark Chen, Alec Radford, Rewon Child, Jeffrey Wu, Heewoo Jun, David Luan, Ilya Sutskever 发布。
|
||||
1. **[LayoutLM](https://huggingface.co/docs/transformers/model_doc/layoutlm)** (来自 Microsoft Research Asia) 伴随论文 [LayoutLM: Pre-training of Text and Layout for Document Image Understanding](https://arxiv.org/abs/1912.13318) 由 Yiheng Xu, Minghao Li, Lei Cui, Shaohan Huang, Furu Wei, Ming Zhou 发布。
|
||||
1. **[LayoutLMv2](https://huggingface.co/docs/transformers/model_doc/layoutlmv2)** (来自 Microsoft Research Asia) 伴随论文 [LayoutLMv2: Multi-modal Pre-training for Visually-Rich Document Understanding](https://arxiv.org/abs/2012.14740) 由 Yang Xu, Yiheng Xu, Tengchao Lv, Lei Cui, Furu Wei, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Wanxiang Che, Min Zhang, Lidong Zhou 发布。
|
||||
1. **[LayoutXLM](https://huggingface.co/docs/transformers/model_doc/layoutlmv2)** (来自 Microsoft Research Asia) 伴随论文 [LayoutXLM: Multimodal Pre-training for Multilingual Visually-rich Document Understanding](https://arxiv.org/abs/2104.08836) 由 Yiheng Xu, Tengchao Lv, Lei Cui, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Furu Wei 发布。
|
||||
@ -281,20 +287,26 @@ conda install -c huggingface transformers
|
||||
1. **[LXMERT](https://huggingface.co/docs/transformers/model_doc/lxmert)** (来自 UNC Chapel Hill) 伴随论文 [LXMERT: Learning Cross-Modality Encoder Representations from Transformers for Open-Domain Question Answering](https://arxiv.org/abs/1908.07490) 由 Hao Tan and Mohit Bansal 发布。
|
||||
1. **[M2M100](https://huggingface.co/docs/transformers/model_doc/m2m_100)** (来自 Facebook) 伴随论文 [Beyond English-Centric Multilingual Machine Translation](https://arxiv.org/abs/2010.11125) 由 Angela Fan, Shruti Bhosale, Holger Schwenk, Zhiyi Ma, Ahmed El-Kishky, Siddharth Goyal, Mandeep Baines, Onur Celebi, Guillaume Wenzek, Vishrav Chaudhary, Naman Goyal, Tom Birch, Vitaliy Liptchinsky, Sergey Edunov, Edouard Grave, Michael Auli, Armand Joulin 发布。
|
||||
1. **[MarianMT](https://huggingface.co/docs/transformers/model_doc/marian)** 用 [OPUS](http://opus.nlpl.eu/) 数据训练的机器翻译模型由 Jörg Tiedemann 发布。[Marian Framework](https://marian-nmt.github.io/) 由微软翻译团队开发。
|
||||
1. **[MaskFormer](https://huggingface.co/docs/transformers/main/model_doc/maskformer)** (from Meta and UIUC) released with the paper [Per-Pixel Classification is Not All You Need for Semantic Segmentation](https://arxiv.org/abs/2107.06278) by Bowen Cheng, Alexander G. Schwing, Alexander Kirillov
|
||||
1. **[MBart](https://huggingface.co/docs/transformers/model_doc/mbart)** (来自 Facebook) 伴随论文 [Multilingual Denoising Pre-training for Neural Machine Translation](https://arxiv.org/abs/2001.08210) 由 Yinhan Liu, Jiatao Gu, Naman Goyal, Xian Li, Sergey Edunov, Marjan Ghazvininejad, Mike Lewis, Luke Zettlemoyer 发布。
|
||||
1. **[MBart-50](https://huggingface.co/docs/transformers/model_doc/mbart)** (来自 Facebook) 伴随论文 [Multilingual Translation with Extensible Multilingual Pretraining and Finetuning](https://arxiv.org/abs/2008.00401) 由 Yuqing Tang, Chau Tran, Xian Li, Peng-Jen Chen, Naman Goyal, Vishrav Chaudhary, Jiatao Gu, Angela Fan 发布。
|
||||
1. **[Megatron-BERT](https://huggingface.co/docs/transformers/model_doc/megatron_bert)** (来自 NVIDIA) 伴随论文 [Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism](https://arxiv.org/abs/1909.08053) 由 Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper and Bryan Catanzaro 发布。
|
||||
1. **[Megatron-BERT](https://huggingface.co/docs/transformers/model_doc/megatron-bert)** (来自 NVIDIA) 伴随论文 [Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism](https://arxiv.org/abs/1909.08053) 由 Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper and Bryan Catanzaro 发布。
|
||||
1. **[Megatron-GPT2](https://huggingface.co/docs/transformers/model_doc/megatron_gpt2)** (来自 NVIDIA) 伴随论文 [Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism](https://arxiv.org/abs/1909.08053) 由 Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper and Bryan Catanzaro 发布。
|
||||
1. **[mLUKE](https://huggingface.co/docs/transformers/model_doc/mluke)** (来自 Studio Ousia) 伴随论文 [mLUKE: The Power of Entity Representations in Multilingual Pretrained Language Models](https://arxiv.org/abs/2110.08151) 由 Ryokan Ri, Ikuya Yamada, and Yoshimasa Tsuruoka 发布。
|
||||
1. **[MPNet](https://huggingface.co/docs/transformers/model_doc/mpnet)** (来自 Microsoft Research) 伴随论文 [MPNet: Masked and Permuted Pre-training for Language Understanding](https://arxiv.org/abs/2004.09297) 由 Kaitao Song, Xu Tan, Tao Qin, Jianfeng Lu, Tie-Yan Liu 发布。
|
||||
1. **[MT5](https://huggingface.co/docs/transformers/model_doc/mt5)** (来自 Google AI) 伴随论文 [mT5: A massively multilingual pre-trained text-to-text transformer](https://arxiv.org/abs/2010.11934) 由 Linting Xue, Noah Constant, Adam Roberts, Mihir Kale, Rami Al-Rfou, Aditya Siddhant, Aditya Barua, Colin Raffel 发布。
|
||||
1. **[Nyströmformer](https://huggingface.co/docs/transformers/main/model_doc/nystromformer)** (来自 the University of Wisconsin - Madison) 伴随论文 [Nyströmformer: A Nyström-Based Algorithm for Approximating Self-Attention](https://arxiv.org/abs/2102.03902) 由 Yunyang Xiong, Zhanpeng Zeng, Rudrasis Chakraborty, Mingxing Tan, Glenn Fung, Yin Li, Vikas Singh 发布。
|
||||
1. **[Pegasus](https://huggingface.co/docs/transformers/model_doc/pegasus)** (来自 Google) 伴随论文 [PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive Summarization](https://arxiv.org/abs/1912.08777) 由 Jingqing Zhang, Yao Zhao, Mohammad Saleh and Peter J. Liu 发布。
|
||||
1. **[Perceiver IO](https://huggingface.co/docs/transformers/model_doc/perceiver)** (来自 Deepmind) 伴随论文 [Perceiver IO: A General Architecture for Structured Inputs & Outputs](https://arxiv.org/abs/2107.14795) 由 Andrew Jaegle, Sebastian Borgeaud, Jean-Baptiste Alayrac, Carl Doersch, Catalin Ionescu, David Ding, Skanda Koppula, Daniel Zoran, Andrew Brock, Evan Shelhamer, Olivier Hénaff, Matthew M. Botvinick, Andrew Zisserman, Oriol Vinyals, João Carreira 发布。
|
||||
1. **[PhoBERT](https://huggingface.co/docs/transformers/model_doc/phobert)** (来自 VinAI Research) 伴随论文 [PhoBERT: Pre-trained language models for Vietnamese](https://www.aclweb.org/anthology/2020.findings-emnlp.92/) 由 Dat Quoc Nguyen and Anh Tuan Nguyen 发布。
|
||||
1. **[PLBart](https://huggingface.co/docs/transformers/main/model_doc/plbart)** (来自 UCLA NLP) 伴随论文 [Unified Pre-training for Program Understanding and Generation](https://arxiv.org/abs/2103.06333) 由 Wasi Uddin Ahmad, Saikat Chakraborty, Baishakhi Ray, Kai-Wei Chang 发布。
|
||||
1. **[PoolFormer](https://huggingface.co/docs/transformers/main/model_doc/poolformer)** (来自 Sea AI Labs) 伴随论文 [MetaFormer is Actually What You Need for Vision](https://arxiv.org/abs/2111.11418) 由 Yu, Weihao and Luo, Mi and Zhou, Pan and Si, Chenyang and Zhou, Yichen and Wang, Xinchao and Feng, Jiashi and Yan, Shuicheng 发布。
|
||||
1. **[ProphetNet](https://huggingface.co/docs/transformers/model_doc/prophetnet)** (来自 Microsoft Research) 伴随论文 [ProphetNet: Predicting Future N-gram for Sequence-to-Sequence Pre-training](https://arxiv.org/abs/2001.04063) 由 Yu Yan, Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang and Ming Zhou 发布。
|
||||
1. **[QDQBert](https://huggingface.co/docs/transformers/model_doc/qdqbert)** (来自 NVIDIA) 伴随论文 [Integer Quantization for Deep Learning Inference: Principles and Empirical Evaluation](https://arxiv.org/abs/2004.09602) 由 Hao Wu, Patrick Judd, Xiaojie Zhang, Mikhail Isaev and Paulius Micikevicius 发布。
|
||||
1. **[REALM](https://huggingface.co/docs/transformers/model_doc/realm.html)** (来自 Google Research) 伴随论文 [REALM: Retrieval-Augmented Language Model Pre-Training](https://arxiv.org/abs/2002.08909) 由 Kelvin Guu, Kenton Lee, Zora Tung, Panupong Pasupat and Ming-Wei Chang 发布。
|
||||
1. **[Reformer](https://huggingface.co/docs/transformers/model_doc/reformer)** (来自 Google Research) 伴随论文 [Reformer: The Efficient Transformer](https://arxiv.org/abs/2001.04451) 由 Nikita Kitaev, Łukasz Kaiser, Anselm Levskaya 发布。
|
||||
1. **[RemBERT](https://huggingface.co/docs/transformers/model_doc/rembert)** (来自 Google Research) 伴随论文 [Rethinking embedding coupling in pre-trained language models](https://arxiv.org/pdf/2010.12821.pdf) 由 Hyung Won Chung, Thibault Févry, Henry Tsai, M. Johnson, Sebastian Ruder 发布。
|
||||
1. **[ResNet](https://huggingface.co/docs/transformers/main/model_doc/resnet)** (from Microsoft Research) released with the paper [Deep Residual Learning for Image Recognition](https://arxiv.org/abs/1512.03385) by Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun.
|
||||
1. **[RoBERTa](https://huggingface.co/docs/transformers/model_doc/roberta)** (来自 Facebook), 伴随论文 [Robustly Optimized BERT Pretraining Approach](https://arxiv.org/abs/1907.11692) 由 Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, Veselin Stoyanov 发布。
|
||||
1. **[RoFormer](https://huggingface.co/docs/transformers/model_doc/roformer)** (来自 ZhuiyiTechnology), 伴随论文 [RoFormer: Enhanced Transformer with Rotary Position Embedding](https://arxiv.org/pdf/2104.09864v1.pdf) 由 Jianlin Su and Yu Lu and Shengfeng Pan and Bo Wen and Yunfeng Liu 发布。
|
||||
1. **[SegFormer](https://huggingface.co/docs/transformers/model_doc/segformer)** (来自 NVIDIA) 伴随论文 [SegFormer: Simple and Efficient Design for Semantic Segmentation with Transformers](https://arxiv.org/abs/2105.15203) 由 Enze Xie, Wenhai Wang, Zhiding Yu, Anima Anandkumar, Jose M. Alvarez, Ping Luo 发布。
|
||||
@ -304,21 +316,31 @@ conda install -c huggingface transformers
|
||||
1. **[SpeechToTextTransformer2](https://huggingface.co/docs/transformers/model_doc/speech_to_text_2)** (来自 Facebook) 伴随论文 [Large-Scale Self- and Semi-Supervised Learning for Speech Translation](https://arxiv.org/abs/2104.06678) 由 Changhan Wang, Anne Wu, Juan Pino, Alexei Baevski, Michael Auli, Alexis Conneau 发布。
|
||||
1. **[Splinter](https://huggingface.co/docs/transformers/model_doc/splinter)** (来自 Tel Aviv University) 伴随论文 [Few-Shot Question Answering by Pretraining Span Selection](https://arxiv.org/abs/2101.00438) 由 Ori Ram, Yuval Kirstain, Jonathan Berant, Amir Globerson, Omer Levy 发布。
|
||||
1. **[SqueezeBert](https://huggingface.co/docs/transformers/model_doc/squeezebert)** (来自 Berkeley) 伴随论文 [SqueezeBERT: What can computer vision teach NLP about efficient neural networks?](https://arxiv.org/abs/2006.11316) 由 Forrest N. Iandola, Albert E. Shaw, Ravi Krishna, and Kurt W. Keutzer 发布。
|
||||
1. **[Swin Transformer](https://huggingface.co/docs/transformers/main/model_doc/swin)** (来自 Microsoft) 伴随论文 [Swin Transformer: Hierarchical Vision Transformer using Shifted Windows](https://arxiv.org/abs/2103.14030) 由 Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, Baining Guo 发布。
|
||||
1. **[T5](https://huggingface.co/docs/transformers/model_doc/t5)** (来自 Google AI) 伴随论文 [Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer](https://arxiv.org/abs/1910.10683) 由 Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu 发布。
|
||||
1. **[T5v1.1](https://huggingface.co/docs/transformers/model_doc/t5v1.1)** (来自 Google AI) 伴随论文 [google-research/text-to-text-transfer-transformer](https://github.com/google-research/text-to-text-transfer-transformer/blob/main/released_checkpoints.md#t511) 由 Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu 发布。
|
||||
1. **[TAPAS](https://huggingface.co/docs/transformers/model_doc/tapas)** (来自 Google AI) 伴随论文 [TAPAS: Weakly Supervised Table Parsing via Pre-training](https://arxiv.org/abs/2004.02349) 由 Jonathan Herzig, Paweł Krzysztof Nowak, Thomas Müller, Francesco Piccinno and Julian Martin Eisenschlos 发布。
|
||||
1. **[Transformer-XL](https://huggingface.co/docs/transformers/model_doc/transformerxl)** (来自 Google/CMU) 伴随论文 [Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context](https://arxiv.org/abs/1901.02860) 由 Zihang Dai*, Zhilin Yang*, Yiming Yang, Jaime Carbonell, Quoc V. Le, Ruslan Salakhutdinov 发布。
|
||||
1. **[Transformer-XL](https://huggingface.co/docs/transformers/model_doc/transfo-xl)** (来自 Google/CMU) 伴随论文 [Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context](https://arxiv.org/abs/1901.02860) 由 Zihang Dai*, Zhilin Yang*, Yiming Yang, Jaime Carbonell, Quoc V. Le, Ruslan Salakhutdinov 发布。
|
||||
1. **[TrOCR](https://huggingface.co/docs/transformers/model_doc/trocr)** (来自 Microsoft) 伴随论文 [TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models](https://arxiv.org/abs/2109.10282) 由 Minghao Li, Tengchao Lv, Lei Cui, Yijuan Lu, Dinei Florencio, Cha Zhang, Zhoujun Li, Furu Wei 发布。
|
||||
1. **[UniSpeech](https://huggingface.co/docs/transformers/model_doc/unispeech)** (来自 Microsoft Research) 伴随论文 [UniSpeech: Unified Speech Representation Learning with Labeled and Unlabeled Data](https://arxiv.org/abs/2101.07597) 由 Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei, Michael Zeng, Xuedong Huang 发布。
|
||||
1. **[UniSpeechSat](https://huggingface.co/docs/transformers/model_doc/unispeech_sat)** (来自 Microsoft Research) 伴随论文 [UNISPEECH-SAT: UNIVERSAL SPEECH REPRESENTATION LEARNING WITH SPEAKER AWARE PRE-TRAINING](https://arxiv.org/abs/2110.05752) 由 Sanyuan Chen, Yu Wu, Chengyi Wang, Zhengyang Chen, Zhuo Chen, Shujie Liu, Jian Wu, Yao Qian, Furu Wei, Jinyu Li, Xiangzhan Yu 发布。
|
||||
1. **[UniSpeechSat](https://huggingface.co/docs/transformers/model_doc/unispeech-sat)** (来自 Microsoft Research) 伴随论文 [UNISPEECH-SAT: UNIVERSAL SPEECH REPRESENTATION LEARNING WITH SPEAKER AWARE PRE-TRAINING](https://arxiv.org/abs/2110.05752) 由 Sanyuan Chen, Yu Wu, Chengyi Wang, Zhengyang Chen, Zhuo Chen, Shujie Liu, Jian Wu, Yao Qian, Furu Wei, Jinyu Li, Xiangzhan Yu 发布。
|
||||
1. **[VAN](https://huggingface.co/docs/transformers/main/model_doc/van)** (来自 Tsinghua University and Nankai University) 伴随论文 [Visual Attention Network](https://arxiv.org/pdf/2202.09741.pdf) 由 Meng-Hao Guo, Cheng-Ze Lu, Zheng-Ning Liu, Ming-Ming Cheng, Shi-Min Hu 发布。
|
||||
1. **[ViLT](https://huggingface.co/docs/transformers/main/model_doc/vilt)** (来自 NAVER AI Lab/Kakao Enterprise/Kakao Brain) 伴随论文 [ViLT: Vision-and-Language Transformer Without Convolution or Region Supervision](https://arxiv.org/abs/2102.03334) 由 Wonjae Kim, Bokyung Son, Ildoo Kim 发布。
|
||||
1. **[Vision Transformer (ViT)](https://huggingface.co/docs/transformers/model_doc/vit)** (来自 Google AI) 伴随论文 [An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale](https://arxiv.org/abs/2010.11929) 由 Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, Neil Houlsby 发布。
|
||||
1. **[VisualBERT](https://huggingface.co/docs/transformers/model_doc/visual_bert)** (来自 UCLA NLP) 伴随论文 [VisualBERT: A Simple and Performant Baseline for Vision and Language](https://arxiv.org/pdf/1908.03557) 由 Liunian Harold Li, Mark Yatskar, Da Yin, Cho-Jui Hsieh, Kai-Wei Chang 发布。
|
||||
1. **[ViTMAE](https://huggingface.co/docs/transformers/main/model_doc/vit_mae)** (来自 Meta AI) 伴随论文 [Masked Autoencoders Are Scalable Vision Learners](https://arxiv.org/abs/2111.06377) 由 Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Dollár, Ross Girshick 发布。
|
||||
1. **[Wav2Vec2](https://huggingface.co/docs/transformers/model_doc/wav2vec2)** (来自 Facebook AI) 伴随论文 [wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations](https://arxiv.org/abs/2006.11477) 由 Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli 发布。
|
||||
1. **[Wav2Vec2Phoneme](https://huggingface.co/docs/transformers/model_doc/wav2vec2_phoneme)** (来自 Facebook AI) 伴随论文 [Simple and Effective Zero-shot Cross-lingual Phoneme Recognition](https://arxiv.org/abs/2109.11680) 由 Qiantong Xu, Alexei Baevski, Michael Auli 发布。
|
||||
1. **[WavLM](https://huggingface.co/docs/transformers/main/model_doc/wavlm)** (from Microsoft Research) released with the paper [WavLM: Large-Scale Self-Supervised Pre-Training for Full Stack Speech Processing](https://arxiv.org/abs/2110.13900) by Sanyuan Chen, Chengyi Wang, Zhengyang Chen, Yu Wu, Shujie Liu, Zhuo Chen, Jinyu Li, Naoyuki Kanda, Takuya Yoshioka, Xiong Xiao, Jian Wu, Long Zhou, Shuo Ren, Yanmin Qian, Yao Qian, Jian Wu, Michael Zeng, Furu Wei.
|
||||
1. **[XGLM](https://huggingface.co/docs/transformers/model_doc/xglm)** (From Facebook AI) released with the paper [Few-shot Learning with Multilingual Language Models](https://arxiv.org/abs/2112.10668) by Xi Victoria Lin, Todor Mihaylov, Mikel Artetxe, Tianlu Wang, Shuohui Chen, Daniel Simig, Myle Ott, Naman Goyal, Shruti Bhosale, Jingfei Du, Ramakanth Pasunuru, Sam Shleifer, Punit Singh Koura, Vishrav Chaudhary, Brian O'Horo, Jeff Wang, Luke Zettlemoyer, Zornitsa Kozareva, Mona Diab, Veselin Stoyanov, Xian Li.
|
||||
1. **[XLM](https://huggingface.co/docs/transformers/model_doc/xlm)** (来自 Facebook) 伴随论文 [Cross-lingual Language Model Pretraining](https://arxiv.org/abs/1901.07291) 由 Guillaume Lample and Alexis Conneau 发布。
|
||||
1. **[XLM-ProphetNet](https://huggingface.co/docs/transformers/model_doc/xlmprophetnet)** (来自 Microsoft Research) 伴随论文 [ProphetNet: Predicting Future N-gram for Sequence-to-Sequence Pre-training](https://arxiv.org/abs/2001.04063) 由 Yu Yan, Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang and Ming Zhou 发布。
|
||||
1. **[XLM-RoBERTa](https://huggingface.co/docs/transformers/model_doc/xlmroberta)** (来自 Facebook AI), 伴随论文 [Unsupervised Cross-lingual Representation Learning at Scale](https://arxiv.org/abs/1911.02116) 由 Alexis Conneau*, Kartikay Khandelwal*, Naman Goyal, Vishrav Chaudhary, Guillaume Wenzek, Francisco Guzmán, Edouard Grave, Myle Ott, Luke Zettlemoyer and Veselin Stoyanov 发布。
|
||||
1. **[XLM-ProphetNet](https://huggingface.co/docs/transformers/model_doc/xlm-prophetnet)** (来自 Microsoft Research) 伴随论文 [ProphetNet: Predicting Future N-gram for Sequence-to-Sequence Pre-training](https://arxiv.org/abs/2001.04063) 由 Yu Yan, Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang and Ming Zhou 发布。
|
||||
1. **[XLM-RoBERTa](https://huggingface.co/docs/transformers/model_doc/xlm-roberta)** (来自 Facebook AI), 伴随论文 [Unsupervised Cross-lingual Representation Learning at Scale](https://arxiv.org/abs/1911.02116) 由 Alexis Conneau*, Kartikay Khandelwal*, Naman Goyal, Vishrav Chaudhary, Guillaume Wenzek, Francisco Guzmán, Edouard Grave, Myle Ott, Luke Zettlemoyer and Veselin Stoyanov 发布。
|
||||
1. **[XLM-RoBERTa-XL](https://huggingface.co/docs/transformers/model_doc/xlm-roberta-xl)** (来自 Facebook AI) 伴随论文 [Larger-Scale Transformers for Multilingual Masked Language Modeling](https://arxiv.org/abs/2105.00572) 由 Naman Goyal, Jingfei Du, Myle Ott, Giri Anantharaman, Alexis Conneau 发布。
|
||||
1. **[XLNet](https://huggingface.co/docs/transformers/model_doc/xlnet)** (来自 Google/CMU) 伴随论文 [XLNet: Generalized Autoregressive Pretraining for Language Understanding](https://arxiv.org/abs/1906.08237) 由 Zhilin Yang*, Zihang Dai*, Yiming Yang, Jaime Carbonell, Ruslan Salakhutdinov, Quoc V. Le 发布。
|
||||
1. **[XLS-R](https://huggingface.co/docs/transformers/model_doc/xls_r)** (来自 Facebook AI) 伴随论文 [XLS-R: Self-supervised Cross-lingual Speech Representation Learning at Scale](https://arxiv.org/abs/2111.09296) 由 Arun Babu, Changhan Wang, Andros Tjandra, Kushal Lakhotia, Qiantong Xu, Naman Goyal, Kritika Singh, Patrick von Platen, Yatharth Saraf, Juan Pino, Alexei Baevski, Alexis Conneau, Michael Auli 发布。
|
||||
1. **[XLSR-Wav2Vec2](https://huggingface.co/docs/transformers/model_doc/xlsr_wav2vec2)** (来自 Facebook AI) 伴随论文 [Unsupervised Cross-Lingual Representation Learning For Speech Recognition](https://arxiv.org/abs/2006.13979) 由 Alexis Conneau, Alexei Baevski, Ronan Collobert, Abdelrahman Mohamed, Michael Auli 发布。
|
||||
1. **[YOSO](https://huggingface.co/docs/transformers/main/model_doc/yoso)** (来自 the University of Wisconsin - Madison) 伴随论文 [You Only Sample (Almost) 由 Zhanpeng Zeng, Yunyang Xiong, Sathya N. Ravi, Shailesh Acharya, Glenn Fung, Vikas Singh 发布。
|
||||
1. 想要贡献新的模型?我们这里有一份**详细指引和模板**来引导你添加新的模型。你可以在 [`templates`](./templates) 目录中找到他们。记得查看 [贡献指南](./CONTRIBUTING.md) 并在开始写 PR 前联系维护人员或开一个新的 issue 来获得反馈。
|
||||
|
||||
要检查某个模型是否已有 Flax、PyTorch 或 TensorFlow 的实现,或其是否在 🤗 Tokenizers 库中有对应词符化器(tokenizer),敬请参阅[此表](https://huggingface.co/docs/transformers/index#supported-frameworks)。
|
||||
@ -334,7 +356,7 @@ conda install -c huggingface transformers
|
||||
| [任务总结](https://huggingface.co/docs/transformers/task_summary) | 🤗 Transformers 支持的任务 |
|
||||
| [预处理教程](https://huggingface.co/docs/transformers/preprocessing) | 使用 `Tokenizer` 来为模型准备数据 |
|
||||
| [训练和微调](https://huggingface.co/docstransformers/training) | 在 PyTorch/TensorFlow 的训练循环或 `Trainer` API 中使用 🤗 Transformers 提供的模型 |
|
||||
| [快速上手:微调和用例脚本](https://github.com/huggingface/transformers/tree/master/examples) | 为各种任务提供的用例脚本 |
|
||||
| [快速上手:微调和用例脚本](https://github.com/huggingface/transformers/tree/main/examples) | 为各种任务提供的用例脚本 |
|
||||
| [模型分享和上传](https://huggingface.co/docs/transformers/model_sharing) | 和社区上传和分享你微调的模型 |
|
||||
| [迁移](https://huggingface.co/docs/transformers/migration) | 从 `pytorch-transformers` 或 `pytorch-pretrained-bert` 迁移到 🤗 Transformers |
|
||||
|
||||
|
||||
@ -53,14 +53,14 @@ user: 使用者
|
||||
|
||||
<p align="center">
|
||||
<br>
|
||||
<img src="https://raw.githubusercontent.com/huggingface/transformers/master/docs/source/imgs/transformers_logo_name.png" width="400"/>
|
||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers_logo_name.png" width="400"/>
|
||||
<br>
|
||||
<p>
|
||||
<p align="center">
|
||||
<a href="https://circleci.com/gh/huggingface/transformers">
|
||||
<img alt="Build" src="https://img.shields.io/circleci/build/github/huggingface/transformers/master">
|
||||
<img alt="Build" src="https://img.shields.io/circleci/build/github/huggingface/transformers/main">
|
||||
</a>
|
||||
<a href="https://github.com/huggingface/transformers/blob/master/LICENSE">
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/LICENSE">
|
||||
<img alt="GitHub" src="https://img.shields.io/github/license/huggingface/transformers.svg?color=blue">
|
||||
</a>
|
||||
<a href="https://huggingface.co/docs/transformers/index">
|
||||
@ -69,7 +69,7 @@ user: 使用者
|
||||
<a href="https://github.com/huggingface/transformers/releases">
|
||||
<img alt="GitHub release" src="https://img.shields.io/github/release/huggingface/transformers.svg">
|
||||
</a>
|
||||
<a href="https://github.com/huggingface/transformers/blob/master/CODE_OF_CONDUCT.md">
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/CODE_OF_CONDUCT.md">
|
||||
<img alt="Contributor Covenant" src="https://img.shields.io/badge/Contributor%20Covenant-v2.0%20adopted-ff69b4.svg">
|
||||
</a>
|
||||
<a href="https://zenodo.org/badge/latestdoi/155220641"><img src="https://zenodo.org/badge/155220641.svg" alt="DOI"></a>
|
||||
@ -78,9 +78,9 @@ user: 使用者
|
||||
<h4 align="center">
|
||||
<p>
|
||||
<a href="https://github.com/huggingface/transformers/">English</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/master/README_zh-hans.md">简体中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_zh-hans.md">简体中文</a> |
|
||||
<b>繁體中文</b> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/master/README_ko.md">한국어</a>
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_ko.md">한국어</a>
|
||||
<p>
|
||||
</h4>
|
||||
|
||||
@ -89,7 +89,7 @@ user: 使用者
|
||||
</h3>
|
||||
|
||||
<h3 align="center">
|
||||
<a href="https://hf.co/course"><img src="https://raw.githubusercontent.com/huggingface/transformers/master/docs/source/imgs/course_banner.png"></a>
|
||||
<a href="https://hf.co/course"><img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/course_banner.png"></a>
|
||||
</h3>
|
||||
|
||||
🤗 Transformers 提供了數以千計的預訓練模型,支援 100 多種語言的文本分類、資訊擷取、問答、摘要、翻譯、文本生成。它的宗旨是讓最先進的 NLP 技術人人易用。
|
||||
@ -203,7 +203,7 @@ Tokenizer 為所有的預訓練模型提供了預處理,並可以直接轉換
|
||||
|
||||
- 本函式庫並不是模組化的神經網絡工具箱。模型文件中的程式碼並未做額外的抽象封裝,以便研究人員快速地翻閱及修改程式碼,而不會深陷複雜的類別包裝之中。
|
||||
- `Trainer` API 並非相容任何模型,它只為本函式庫中的模型最佳化。對於一般的機器學習用途,請使用其他函式庫。
|
||||
- 儘管我們已盡力而為,[examples 目錄](https://github.com/huggingface/transformers/tree/master/examples)中的腳本也僅為範例而已。對於特定問題,它們並不一定隨選即用,可能需要修改幾行程式碼以符合需求。
|
||||
- 儘管我們已盡力而為,[examples 目錄](https://github.com/huggingface/transformers/tree/main/examples)中的腳本也僅為範例而已。對於特定問題,它們並不一定隨選即用,可能需要修改幾行程式碼以符合需求。
|
||||
|
||||
## 安裝
|
||||
|
||||
@ -251,39 +251,45 @@ conda install -c huggingface transformers
|
||||
1. **[BARTpho](https://huggingface.co/docs/transformers/model_doc/bartpho)** (from VinAI Research) released with the paper [BARTpho: Pre-trained Sequence-to-Sequence Models for Vietnamese](https://arxiv.org/abs/2109.09701) by Nguyen Luong Tran, Duong Minh Le and Dat Quoc Nguyen.
|
||||
1. **[BEiT](https://huggingface.co/docs/transformers/model_doc/beit)** (from Microsoft) released with the paper [BEiT: BERT Pre-Training of Image Transformers](https://arxiv.org/abs/2106.08254) by Hangbo Bao, Li Dong, Furu Wei.
|
||||
1. **[BERT](https://huggingface.co/docs/transformers/model_doc/bert)** (from Google) released with the paper [BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding](https://arxiv.org/abs/1810.04805) by Jacob Devlin, Ming-Wei Chang, Kenton Lee and Kristina Toutanova.
|
||||
1. **[BERT For Sequence Generation](https://huggingface.co/docs/transformers/model_doc/bertgeneration)** (from Google) released with the paper [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://arxiv.org/abs/1907.12461) by Sascha Rothe, Shashi Narayan, Aliaksei Severyn.
|
||||
1. **[BERT For Sequence Generation](https://huggingface.co/docs/transformers/model_doc/bert-generation)** (from Google) released with the paper [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://arxiv.org/abs/1907.12461) by Sascha Rothe, Shashi Narayan, Aliaksei Severyn.
|
||||
1. **[BERTweet](https://huggingface.co/docs/transformers/model_doc/bertweet)** (from VinAI Research) released with the paper [BERTweet: A pre-trained language model for English Tweets](https://aclanthology.org/2020.emnlp-demos.2/) by Dat Quoc Nguyen, Thanh Vu and Anh Tuan Nguyen.
|
||||
1. **[BigBird-Pegasus](https://huggingface.co/docs/transformers/model_doc/bigbird_pegasus)** (from Google Research) released with the paper [Big Bird: Transformers for Longer Sequences](https://arxiv.org/abs/2007.14062) by Manzil Zaheer, Guru Guruganesh, Avinava Dubey, Joshua Ainslie, Chris Alberti, Santiago Ontanon, Philip Pham, Anirudh Ravula, Qifan Wang, Li Yang, Amr Ahmed.
|
||||
1. **[BigBird-RoBERTa](https://huggingface.co/docs/transformers/model_doc/bigbird)** (from Google Research) released with the paper [Big Bird: Transformers for Longer Sequences](https://arxiv.org/abs/2007.14062) by Manzil Zaheer, Guru Guruganesh, Avinava Dubey, Joshua Ainslie, Chris Alberti, Santiago Ontanon, Philip Pham, Anirudh Ravula, Qifan Wang, Li Yang, Amr Ahmed.
|
||||
1. **[BigBird-RoBERTa](https://huggingface.co/docs/transformers/model_doc/big_bird)** (from Google Research) released with the paper [Big Bird: Transformers for Longer Sequences](https://arxiv.org/abs/2007.14062) by Manzil Zaheer, Guru Guruganesh, Avinava Dubey, Joshua Ainslie, Chris Alberti, Santiago Ontanon, Philip Pham, Anirudh Ravula, Qifan Wang, Li Yang, Amr Ahmed.
|
||||
1. **[Blenderbot](https://huggingface.co/docs/transformers/model_doc/blenderbot)** (from Facebook) released with the paper [Recipes for building an open-domain chatbot](https://arxiv.org/abs/2004.13637) by Stephen Roller, Emily Dinan, Naman Goyal, Da Ju, Mary Williamson, Yinhan Liu, Jing Xu, Myle Ott, Kurt Shuster, Eric M. Smith, Y-Lan Boureau, Jason Weston.
|
||||
1. **[BlenderbotSmall](https://huggingface.co/docs/transformers/model_doc/blenderbot_small)** (from Facebook) released with the paper [Recipes for building an open-domain chatbot](https://arxiv.org/abs/2004.13637) by Stephen Roller, Emily Dinan, Naman Goyal, Da Ju, Mary Williamson, Yinhan Liu, Jing Xu, Myle Ott, Kurt Shuster, Eric M. Smith, Y-Lan Boureau, Jason Weston.
|
||||
1. **[BlenderbotSmall](https://huggingface.co/docs/transformers/model_doc/blenderbot-small)** (from Facebook) released with the paper [Recipes for building an open-domain chatbot](https://arxiv.org/abs/2004.13637) by Stephen Roller, Emily Dinan, Naman Goyal, Da Ju, Mary Williamson, Yinhan Liu, Jing Xu, Myle Ott, Kurt Shuster, Eric M. Smith, Y-Lan Boureau, Jason Weston.
|
||||
1. **[BORT](https://huggingface.co/docs/transformers/model_doc/bort)** (from Alexa) released with the paper [Optimal Subarchitecture Extraction For BERT](https://arxiv.org/abs/2010.10499) by Adrian de Wynter and Daniel J. Perry.
|
||||
1. **[ByT5](https://huggingface.co/docs/transformers/model_doc/byt5)** (from Google Research) released with the paper [ByT5: Towards a token-free future with pre-trained byte-to-byte models](https://arxiv.org/abs/2105.13626) by Linting Xue, Aditya Barua, Noah Constant, Rami Al-Rfou, Sharan Narang, Mihir Kale, Adam Roberts, Colin Raffel.
|
||||
1. **[CamemBERT](https://huggingface.co/docs/transformers/model_doc/camembert)** (from Inria/Facebook/Sorbonne) released with the paper [CamemBERT: a Tasty French Language Model](https://arxiv.org/abs/1911.03894) by Louis Martin*, Benjamin Muller*, Pedro Javier Ortiz Suárez*, Yoann Dupont, Laurent Romary, Éric Villemonte de la Clergerie, Djamé Seddah and Benoît Sagot.
|
||||
1. **[CANINE](https://huggingface.co/docs/transformers/model_doc/canine)** (from Google Research) released with the paper [CANINE: Pre-training an Efficient Tokenization-Free Encoder for Language Representation](https://arxiv.org/abs/2103.06874) by Jonathan H. Clark, Dan Garrette, Iulia Turc, John Wieting.
|
||||
1. **[CLIP](https://huggingface.co/docs/transformers/model_doc/clip)** (from OpenAI) released with the paper [Learning Transferable Visual Models From Natural Language Supervision](https://arxiv.org/abs/2103.00020) by Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, Ilya Sutskever.
|
||||
1. **[ConvBERT](https://huggingface.co/docs/transformers/model_doc/convbert)** (from YituTech) released with the paper [ConvBERT: Improving BERT with Span-based Dynamic Convolution](https://arxiv.org/abs/2008.02496) by Zihang Jiang, Weihao Yu, Daquan Zhou, Yunpeng Chen, Jiashi Feng, Shuicheng Yan.
|
||||
1. **[ConvNeXT](https://huggingface.co/docs/transformers/main/model_doc/convnext)** (from Facebook AI) released with the paper [A ConvNet for the 2020s](https://arxiv.org/abs/2201.03545) by Zhuang Liu, Hanzi Mao, Chao-Yuan Wu, Christoph Feichtenhofer, Trevor Darrell, Saining Xie.
|
||||
1. **[CPM](https://huggingface.co/docs/transformers/model_doc/cpm)** (from Tsinghua University) released with the paper [CPM: A Large-scale Generative Chinese Pre-trained Language Model](https://arxiv.org/abs/2012.00413) by Zhengyan Zhang, Xu Han, Hao Zhou, Pei Ke, Yuxian Gu, Deming Ye, Yujia Qin, Yusheng Su, Haozhe Ji, Jian Guan, Fanchao Qi, Xiaozhi Wang, Yanan Zheng, Guoyang Zeng, Huanqi Cao, Shengqi Chen, Daixuan Li, Zhenbo Sun, Zhiyuan Liu, Minlie Huang, Wentao Han, Jie Tang, Juanzi Li, Xiaoyan Zhu, Maosong Sun.
|
||||
1. **[CTRL](https://huggingface.co/docs/transformers/model_doc/ctrl)** (from Salesforce) released with the paper [CTRL: A Conditional Transformer Language Model for Controllable Generation](https://arxiv.org/abs/1909.05858) by Nitish Shirish Keskar*, Bryan McCann*, Lav R. Varshney, Caiming Xiong and Richard Socher.
|
||||
1. **[Data2Vec](https://huggingface.co/docs/transformers/main/model_doc/data2vec)** (from Facebook) released with the paper [Data2Vec: A General Framework for Self-supervised Learning in Speech, Vision and Language](https://arxiv.org/abs/2202.03555) by Alexei Baevski, Wei-Ning Hsu, Qiantong Xu, Arun Babu, Jiatao Gu, Michael Auli.
|
||||
1. **[DeBERTa](https://huggingface.co/docs/transformers/model_doc/deberta)** (from Microsoft) released with the paper [DeBERTa: Decoding-enhanced BERT with Disentangled Attention](https://arxiv.org/abs/2006.03654) by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen.
|
||||
1. **[DeBERTa-v2](https://huggingface.co/docs/transformers/model_doc/deberta_v2)** (from Microsoft) released with the paper [DeBERTa: Decoding-enhanced BERT with Disentangled Attention](https://arxiv.org/abs/2006.03654) by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen.
|
||||
1. **[DeBERTa-v2](https://huggingface.co/docs/transformers/model_doc/deberta-v2)** (from Microsoft) released with the paper [DeBERTa: Decoding-enhanced BERT with Disentangled Attention](https://arxiv.org/abs/2006.03654) by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen.
|
||||
1. **[Decision Transformer](https://huggingface.co/docs/transformers/model_doc/decision_transformer)** (from Berkeley/Facebook/Google) released with the paper [Decision Transformer: Reinforcement Learning via Sequence Modeling](https://arxiv.org/abs/2106.01345) by Lili Chen, Kevin Lu, Aravind Rajeswaran, Kimin Lee, Aditya Grover, Michael Laskin, Pieter Abbeel, Aravind Srinivas, Igor Mordatch.
|
||||
1. **[DeiT](https://huggingface.co/docs/transformers/model_doc/deit)** (from Facebook) released with the paper [Training data-efficient image transformers & distillation through attention](https://arxiv.org/abs/2012.12877) by Hugo Touvron, Matthieu Cord, Matthijs Douze, Francisco Massa, Alexandre Sablayrolles, Hervé Jégou.
|
||||
1. **[DETR](https://huggingface.co/docs/transformers/model_doc/detr)** (from Facebook) released with the paper [End-to-End Object Detection with Transformers](https://arxiv.org/abs/2005.12872) by Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, Sergey Zagoruyko.
|
||||
1. **[DialoGPT](https://huggingface.co/docs/transformers/model_doc/dialogpt)** (from Microsoft Research) released with the paper [DialoGPT: Large-Scale Generative Pre-training for Conversational Response Generation](https://arxiv.org/abs/1911.00536) by Yizhe Zhang, Siqi Sun, Michel Galley, Yen-Chun Chen, Chris Brockett, Xiang Gao, Jianfeng Gao, Jingjing Liu, Bill Dolan.
|
||||
1. **[DistilBERT](https://huggingface.co/docs/transformers/model_doc/distilbert)** (from HuggingFace), released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same method has been applied to compress GPT2 into [DistilGPT2](https://github.com/huggingface/transformers/tree/master/examples/distillation), RoBERTa into [DistilRoBERTa](https://github.com/huggingface/transformers/tree/master/examples/distillation), Multilingual BERT into [DistilmBERT](https://github.com/huggingface/transformers/tree/master/examples/distillation) and a German version of DistilBERT.
|
||||
1. **[DistilBERT](https://huggingface.co/docs/transformers/model_doc/distilbert)** (from HuggingFace), released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same method has been applied to compress GPT2 into [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation), Multilingual BERT into [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German version of DistilBERT.
|
||||
1. **[DiT](https://huggingface.co/docs/transformers/model_doc/dit)** (from Microsoft Research) released with the paper [DiT: Self-supervised Pre-training for Document Image Transformer](https://arxiv.org/abs/2203.02378) by Junlong Li, Yiheng Xu, Tengchao Lv, Lei Cui, Cha Zhang, Furu Wei.
|
||||
1. **[DPR](https://huggingface.co/docs/transformers/model_doc/dpr)** (from Facebook) released with the paper [Dense Passage Retrieval for Open-Domain Question Answering](https://arxiv.org/abs/2004.04906) by Vladimir Karpukhin, Barlas Oğuz, Sewon Min, Patrick Lewis, Ledell Wu, Sergey Edunov, Danqi Chen, and Wen-tau Yih.
|
||||
1. **[DPT](https://huggingface.co/docs/transformers/master/model_doc/dpt)** (from Intel Labs) released with the paper [Vision Transformers for Dense Prediction](https://arxiv.org/abs/2103.13413) by René Ranftl, Alexey Bochkovskiy, Vladlen Koltun.
|
||||
1. **[ELECTRA](https://huggingface.co/docs/transformers/model_doc/electra)** (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang Luong, Quoc V. Le, Christopher D. Manning.
|
||||
1. **[EncoderDecoder](https://huggingface.co/docs/transformers/model_doc/encoderdecoder)** (from Google Research) released with the paper [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://arxiv.org/abs/1907.12461) by Sascha Rothe, Shashi Narayan, Aliaksei Severyn.
|
||||
1. **[EncoderDecoder](https://huggingface.co/docs/transformers/model_doc/encoder-decoder)** (from Google Research) released with the paper [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://arxiv.org/abs/1907.12461) by Sascha Rothe, Shashi Narayan, Aliaksei Severyn.
|
||||
1. **[FlauBERT](https://huggingface.co/docs/transformers/model_doc/flaubert)** (from CNRS) released with the paper [FlauBERT: Unsupervised Language Model Pre-training for French](https://arxiv.org/abs/1912.05372) by Hang Le, Loïc Vial, Jibril Frej, Vincent Segonne, Maximin Coavoux, Benjamin Lecouteux, Alexandre Allauzen, Benoît Crabbé, Laurent Besacier, Didier Schwab.
|
||||
1. **[FNet](https://huggingface.co/docs/transformers/model_doc/fnet)** (from Google Research) released with the paper [FNet: Mixing Tokens with Fourier Transforms](https://arxiv.org/abs/2105.03824) by James Lee-Thorp, Joshua Ainslie, Ilya Eckstein, Santiago Ontanon.
|
||||
1. **[Funnel Transformer](https://huggingface.co/docs/transformers/model_doc/funnel)** (from CMU/Google Brain) released with the paper [Funnel-Transformer: Filtering out Sequential Redundancy for Efficient Language Processing](https://arxiv.org/abs/2006.03236) by Zihang Dai, Guokun Lai, Yiming Yang, Quoc V. Le.
|
||||
1. **[GPT](https://huggingface.co/docs/transformers/model_doc/gpt)** (from OpenAI) released with the paper [Improving Language Understanding by Generative Pre-Training](https://blog.openai.com/language-unsupervised/) by Alec Radford, Karthik Narasimhan, Tim Salimans and Ilya Sutskever.
|
||||
1. **[GLPN](https://huggingface.co/docs/transformers/main/model_doc/glpn)** (from KAIST) released with the paper [Global-Local Path Networks for Monocular Depth Estimation with Vertical CutDepth](https://arxiv.org/abs/2201.07436) by Doyeon Kim, Woonghyun Ga, Pyungwhan Ahn, Donggyu Joo, Sehwan Chun, Junmo Kim.
|
||||
1. **[GPT](https://huggingface.co/docs/transformers/model_doc/openai-gpt)** (from OpenAI) released with the paper [Improving Language Understanding by Generative Pre-Training](https://blog.openai.com/language-unsupervised/) by Alec Radford, Karthik Narasimhan, Tim Salimans and Ilya Sutskever.
|
||||
1. **[GPT Neo](https://huggingface.co/docs/transformers/model_doc/gpt_neo)** (from EleutherAI) released in the repository [EleutherAI/gpt-neo](https://github.com/EleutherAI/gpt-neo) by Sid Black, Stella Biderman, Leo Gao, Phil Wang and Connor Leahy.
|
||||
1. **[GPT-2](https://huggingface.co/docs/transformers/model_doc/gpt2)** (from OpenAI) released with the paper [Language Models are Unsupervised Multitask Learners](https://blog.openai.com/better-language-models/) by Alec Radford*, Jeffrey Wu*, Rewon Child, David Luan, Dario Amodei** and Ilya Sutskever**.
|
||||
1. **[GPT-J](https://huggingface.co/docs/transformers/model_doc/gptj)** (from EleutherAI) released with the paper [kingoflolz/mesh-transformer-jax](https://github.com/kingoflolz/mesh-transformer-jax/) by Ben Wang and Aran Komatsuzaki.
|
||||
1. **[Hubert](https://huggingface.co/docs/transformers/model_doc/hubert)** (from Facebook) released with the paper [HuBERT: Self-Supervised Speech Representation Learning by Masked Prediction of Hidden Units](https://arxiv.org/abs/2106.07447) by Wei-Ning Hsu, Benjamin Bolte, Yao-Hung Hubert Tsai, Kushal Lakhotia, Ruslan Salakhutdinov, Abdelrahman Mohamed.
|
||||
1. **[I-BERT](https://huggingface.co/docs/transformers/model_doc/ibert)** (from Berkeley) released with the paper [I-BERT: Integer-only BERT Quantization](https://arxiv.org/abs/2101.01321) by Sehoon Kim, Amir Gholami, Zhewei Yao, Michael W. Mahoney, Kurt Keutzer.
|
||||
1. **[ImageGPT](https://huggingface.co/docs/transformers/master/model_doc/imagegpt)** (from OpenAI) released with the paper [Generative Pretraining from Pixels](https://openai.com/blog/image-gpt/) by Mark Chen, Alec Radford, Rewon Child, Jeffrey Wu, Heewoo Jun, David Luan, Ilya Sutskever.
|
||||
1. **[ImageGPT](https://huggingface.co/docs/transformers/main/model_doc/imagegpt)** (from OpenAI) released with the paper [Generative Pretraining from Pixels](https://openai.com/blog/image-gpt/) by Mark Chen, Alec Radford, Rewon Child, Jeffrey Wu, Heewoo Jun, David Luan, Ilya Sutskever.
|
||||
1. **[LayoutLM](https://huggingface.co/docs/transformers/model_doc/layoutlm)** (from Microsoft Research Asia) released with the paper [LayoutLM: Pre-training of Text and Layout for Document Image Understanding](https://arxiv.org/abs/1912.13318) by Yiheng Xu, Minghao Li, Lei Cui, Shaohan Huang, Furu Wei, Ming Zhou.
|
||||
1. **[LayoutLMv2](https://huggingface.co/docs/transformers/model_doc/layoutlmv2)** (from Microsoft Research Asia) released with the paper [LayoutLMv2: Multi-modal Pre-training for Visually-Rich Document Understanding](https://arxiv.org/abs/2012.14740) by Yang Xu, Yiheng Xu, Tengchao Lv, Lei Cui, Furu Wei, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Wanxiang Che, Min Zhang, Lidong Zhou.
|
||||
1. **[LayoutXLM](https://huggingface.co/docs/transformers/model_doc/layoutlmv2)** (from Microsoft Research Asia) released with the paper [LayoutXLM: Multimodal Pre-training for Multilingual Visually-rich Document Understanding](https://arxiv.org/abs/2104.08836) by Yiheng Xu, Tengchao Lv, Lei Cui, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Furu Wei.
|
||||
@ -293,20 +299,26 @@ conda install -c huggingface transformers
|
||||
1. **[LXMERT](https://huggingface.co/docs/transformers/model_doc/lxmert)** (from UNC Chapel Hill) released with the paper [LXMERT: Learning Cross-Modality Encoder Representations from Transformers for Open-Domain Question Answering](https://arxiv.org/abs/1908.07490) by Hao Tan and Mohit Bansal.
|
||||
1. **[M2M100](https://huggingface.co/docs/transformers/model_doc/m2m_100)** (from Facebook) released with the paper [Beyond English-Centric Multilingual Machine Translation](https://arxiv.org/abs/2010.11125) by Angela Fan, Shruti Bhosale, Holger Schwenk, Zhiyi Ma, Ahmed El-Kishky, Siddharth Goyal, Mandeep Baines, Onur Celebi, Guillaume Wenzek, Vishrav Chaudhary, Naman Goyal, Tom Birch, Vitaliy Liptchinsky, Sergey Edunov, Edouard Grave, Michael Auli, Armand Joulin.
|
||||
1. **[MarianMT](https://huggingface.co/docs/transformers/model_doc/marian)** Machine translation models trained using [OPUS](http://opus.nlpl.eu/) data by Jörg Tiedemann. The [Marian Framework](https://marian-nmt.github.io/) is being developed by the Microsoft Translator Team.
|
||||
1. **[MaskFormer](https://huggingface.co/docs/transformers/main/model_doc/maskformer)** (from Meta and UIUC) released with the paper [Per-Pixel Classification is Not All You Need for Semantic Segmentation](https://arxiv.org/abs/2107.06278) by Bowen Cheng, Alexander G. Schwing, Alexander Kirillov
|
||||
1. **[MBart](https://huggingface.co/docs/transformers/model_doc/mbart)** (from Facebook) released with the paper [Multilingual Denoising Pre-training for Neural Machine Translation](https://arxiv.org/abs/2001.08210) by Yinhan Liu, Jiatao Gu, Naman Goyal, Xian Li, Sergey Edunov, Marjan Ghazvininejad, Mike Lewis, Luke Zettlemoyer.
|
||||
1. **[MBart-50](https://huggingface.co/docs/transformers/model_doc/mbart)** (from Facebook) released with the paper [Multilingual Translation with Extensible Multilingual Pretraining and Finetuning](https://arxiv.org/abs/2008.00401) by Yuqing Tang, Chau Tran, Xian Li, Peng-Jen Chen, Naman Goyal, Vishrav Chaudhary, Jiatao Gu, Angela Fan.
|
||||
1. **[Megatron-BERT](https://huggingface.co/docs/transformers/model_doc/megatron_bert)** (from NVIDIA) released with the paper [Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism](https://arxiv.org/abs/1909.08053) by Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper and Bryan Catanzaro.
|
||||
1. **[Megatron-BERT](https://huggingface.co/docs/transformers/model_doc/megatron-bert)** (from NVIDIA) released with the paper [Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism](https://arxiv.org/abs/1909.08053) by Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper and Bryan Catanzaro.
|
||||
1. **[Megatron-GPT2](https://huggingface.co/docs/transformers/model_doc/megatron_gpt2)** (from NVIDIA) released with the paper [Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism](https://arxiv.org/abs/1909.08053) by Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper and Bryan Catanzaro.
|
||||
1. **[mLUKE](https://huggingface.co/docs/transformers/model_doc/mluke)** (from Studio Ousia) released with the paper [mLUKE: The Power of Entity Representations in Multilingual Pretrained Language Models](https://arxiv.org/abs/2110.08151) by Ryokan Ri, Ikuya Yamada, and Yoshimasa Tsuruoka.
|
||||
1. **[MPNet](https://huggingface.co/docs/transformers/model_doc/mpnet)** (from Microsoft Research) released with the paper [MPNet: Masked and Permuted Pre-training for Language Understanding](https://arxiv.org/abs/2004.09297) by Kaitao Song, Xu Tan, Tao Qin, Jianfeng Lu, Tie-Yan Liu.
|
||||
1. **[MT5](https://huggingface.co/docs/transformers/model_doc/mt5)** (from Google AI) released with the paper [mT5: A massively multilingual pre-trained text-to-text transformer](https://arxiv.org/abs/2010.11934) by Linting Xue, Noah Constant, Adam Roberts, Mihir Kale, Rami Al-Rfou, Aditya Siddhant, Aditya Barua, Colin Raffel.
|
||||
1. **[Nyströmformer](https://huggingface.co/docs/transformers/main/model_doc/nystromformer)** (from the University of Wisconsin - Madison) released with the paper [Nyströmformer: A Nyström-Based Algorithm for Approximating Self-Attention](https://arxiv.org/abs/2102.03902) by Yunyang Xiong, Zhanpeng Zeng, Rudrasis Chakraborty, Mingxing Tan, Glenn Fung, Yin Li, Vikas Singh.
|
||||
1. **[Pegasus](https://huggingface.co/docs/transformers/model_doc/pegasus)** (from Google) released with the paper [PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive Summarization](https://arxiv.org/abs/1912.08777) by Jingqing Zhang, Yao Zhao, Mohammad Saleh and Peter J. Liu.
|
||||
1. **[Perceiver IO](https://huggingface.co/docs/transformers/model_doc/perceiver)** (from Deepmind) released with the paper [Perceiver IO: A General Architecture for Structured Inputs & Outputs](https://arxiv.org/abs/2107.14795) by Andrew Jaegle, Sebastian Borgeaud, Jean-Baptiste Alayrac, Carl Doersch, Catalin Ionescu, David Ding, Skanda Koppula, Daniel Zoran, Andrew Brock, Evan Shelhamer, Olivier Hénaff, Matthew M. Botvinick, Andrew Zisserman, Oriol Vinyals, João Carreira.
|
||||
1. **[PhoBERT](https://huggingface.co/docs/transformers/model_doc/phobert)** (from VinAI Research) released with the paper [PhoBERT: Pre-trained language models for Vietnamese](https://www.aclweb.org/anthology/2020.findings-emnlp.92/) by Dat Quoc Nguyen and Anh Tuan Nguyen.
|
||||
1. **[PLBart](https://huggingface.co/docs/transformers/main/model_doc/plbart)** (from UCLA NLP) released with the paper [Unified Pre-training for Program Understanding and Generation](https://arxiv.org/abs/2103.06333) by Wasi Uddin Ahmad, Saikat Chakraborty, Baishakhi Ray, Kai-Wei Chang.
|
||||
1. **[PoolFormer](https://huggingface.co/docs/transformers/main/model_doc/poolformer)** (from Sea AI Labs) released with the paper [MetaFormer is Actually What You Need for Vision](https://arxiv.org/abs/2111.11418) by Yu, Weihao and Luo, Mi and Zhou, Pan and Si, Chenyang and Zhou, Yichen and Wang, Xinchao and Feng, Jiashi and Yan, Shuicheng.
|
||||
1. **[ProphetNet](https://huggingface.co/docs/transformers/model_doc/prophetnet)** (from Microsoft Research) released with the paper [ProphetNet: Predicting Future N-gram for Sequence-to-Sequence Pre-training](https://arxiv.org/abs/2001.04063) by Yu Yan, Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang and Ming Zhou.
|
||||
1. **[QDQBert](https://huggingface.co/docs/transformers/model_doc/qdqbert)** (from NVIDIA) released with the paper [Integer Quantization for Deep Learning Inference: Principles and Empirical Evaluation](https://arxiv.org/abs/2004.09602) by Hao Wu, Patrick Judd, Xiaojie Zhang, Mikhail Isaev and Paulius Micikevicius.
|
||||
1. **[REALM](https://huggingface.co/docs/transformers/model_doc/realm.html)** (from Google Research) released with the paper [REALM: Retrieval-Augmented Language Model Pre-Training](https://arxiv.org/abs/2002.08909) by Kelvin Guu, Kenton Lee, Zora Tung, Panupong Pasupat and Ming-Wei Chang.
|
||||
1. **[Reformer](https://huggingface.co/docs/transformers/model_doc/reformer)** (from Google Research) released with the paper [Reformer: The Efficient Transformer](https://arxiv.org/abs/2001.04451) by Nikita Kitaev, Łukasz Kaiser, Anselm Levskaya.
|
||||
1. **[RemBERT](https://huggingface.co/docs/transformers/model_doc/rembert)** (from Google Research) released with the paper [Rethinking embedding coupling in pre-trained language models](https://arxiv.org/pdf/2010.12821.pdf) by Hyung Won Chung, Thibault Févry, Henry Tsai, M. Johnson, Sebastian Ruder.
|
||||
1. **[ResNet](https://huggingface.co/docs/transformers/main/model_doc/resnet)** (from Microsoft Research) released with the paper [Deep Residual Learning for Image Recognition](https://arxiv.org/abs/1512.03385) by Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun.
|
||||
1. **[RoBERTa](https://huggingface.co/docs/transformers/model_doc/roberta)** (from Facebook), released together with the paper a [Robustly Optimized BERT Pretraining Approach](https://arxiv.org/abs/1907.11692) by Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, Veselin Stoyanov.
|
||||
1. **[RoFormer](https://huggingface.co/docs/transformers/model_doc/roformer)** (from ZhuiyiTechnology), released together with the paper a [RoFormer: Enhanced Transformer with Rotary Position Embedding](https://arxiv.org/pdf/2104.09864v1.pdf) by Jianlin Su and Yu Lu and Shengfeng Pan and Bo Wen and Yunfeng Liu.
|
||||
1. **[SegFormer](https://huggingface.co/docs/transformers/model_doc/segformer)** (from NVIDIA) released with the paper [SegFormer: Simple and Efficient Design for Semantic Segmentation with Transformers](https://arxiv.org/abs/2105.15203) by Enze Xie, Wenhai Wang, Zhiding Yu, Anima Anandkumar, Jose M. Alvarez, Ping Luo.
|
||||
@ -316,21 +328,31 @@ conda install -c huggingface transformers
|
||||
1. **[SpeechToTextTransformer2](https://huggingface.co/docs/transformers/model_doc/speech_to_text_2)** (from Facebook) released with the paper [Large-Scale Self- and Semi-Supervised Learning for Speech Translation](https://arxiv.org/abs/2104.06678) by Changhan Wang, Anne Wu, Juan Pino, Alexei Baevski, Michael Auli, Alexis Conneau.
|
||||
1. **[Splinter](https://huggingface.co/docs/transformers/model_doc/splinter)** (from Tel Aviv University) released with the paper [Few-Shot Question Answering by Pretraining Span Selection](https://arxiv.org/abs/2101.00438) by Ori Ram, Yuval Kirstain, Jonathan Berant, Amir Globerson, Omer Levy.
|
||||
1. **[SqueezeBert](https://huggingface.co/docs/transformers/model_doc/squeezebert)** (from Berkeley) released with the paper [SqueezeBERT: What can computer vision teach NLP about efficient neural networks?](https://arxiv.org/abs/2006.11316) by Forrest N. Iandola, Albert E. Shaw, Ravi Krishna, and Kurt W. Keutzer.
|
||||
1. **[Swin Transformer](https://huggingface.co/docs/transformers/main/model_doc/swin)** (from Microsoft) released with the paper [Swin Transformer: Hierarchical Vision Transformer using Shifted Windows](https://arxiv.org/abs/2103.14030) by Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, Baining Guo.
|
||||
1. **[T5](https://huggingface.co/docs/transformers/model_doc/t5)** (from Google AI) released with the paper [Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer](https://arxiv.org/abs/1910.10683) by Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu.
|
||||
1. **[T5v1.1](https://huggingface.co/docs/transformers/model_doc/t5v1.1)** (from Google AI) released with the paper [google-research/text-to-text-transfer-transformer](https://github.com/google-research/text-to-text-transfer-transformer/blob/main/released_checkpoints.md#t511) by Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu.
|
||||
1. **[TAPAS](https://huggingface.co/docs/transformers/model_doc/tapas)** (from Google AI) released with the paper [TAPAS: Weakly Supervised Table Parsing via Pre-training](https://arxiv.org/abs/2004.02349) by Jonathan Herzig, Paweł Krzysztof Nowak, Thomas Müller, Francesco Piccinno and Julian Martin Eisenschlos.
|
||||
1. **[Transformer-XL](https://huggingface.co/docs/transformers/model_doc/transformerxl)** (from Google/CMU) released with the paper [Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context](https://arxiv.org/abs/1901.02860) by Zihang Dai*, Zhilin Yang*, Yiming Yang, Jaime Carbonell, Quoc V. Le, Ruslan Salakhutdinov.
|
||||
1. **[Transformer-XL](https://huggingface.co/docs/transformers/model_doc/transfo-xl)** (from Google/CMU) released with the paper [Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context](https://arxiv.org/abs/1901.02860) by Zihang Dai*, Zhilin Yang*, Yiming Yang, Jaime Carbonell, Quoc V. Le, Ruslan Salakhutdinov.
|
||||
1. **[TrOCR](https://huggingface.co/docs/transformers/model_doc/trocr)** (from Microsoft) released with the paper [TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models](https://arxiv.org/abs/2109.10282) by Minghao Li, Tengchao Lv, Lei Cui, Yijuan Lu, Dinei Florencio, Cha Zhang, Zhoujun Li, Furu Wei.
|
||||
1. **[UniSpeech](https://huggingface.co/docs/transformers/model_doc/unispeech)** (from Microsoft Research) released with the paper [UniSpeech: Unified Speech Representation Learning with Labeled and Unlabeled Data](https://arxiv.org/abs/2101.07597) by Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei, Michael Zeng, Xuedong Huang.
|
||||
1. **[UniSpeechSat](https://huggingface.co/docs/transformers/model_doc/unispeech_sat)** (from Microsoft Research) released with the paper [UNISPEECH-SAT: UNIVERSAL SPEECH REPRESENTATION LEARNING WITH SPEAKER AWARE PRE-TRAINING](https://arxiv.org/abs/2110.05752) by Sanyuan Chen, Yu Wu, Chengyi Wang, Zhengyang Chen, Zhuo Chen, Shujie Liu, Jian Wu, Yao Qian, Furu Wei, Jinyu Li, Xiangzhan Yu.
|
||||
1. **[UniSpeechSat](https://huggingface.co/docs/transformers/model_doc/unispeech-sat)** (from Microsoft Research) released with the paper [UNISPEECH-SAT: UNIVERSAL SPEECH REPRESENTATION LEARNING WITH SPEAKER AWARE PRE-TRAINING](https://arxiv.org/abs/2110.05752) by Sanyuan Chen, Yu Wu, Chengyi Wang, Zhengyang Chen, Zhuo Chen, Shujie Liu, Jian Wu, Yao Qian, Furu Wei, Jinyu Li, Xiangzhan Yu.
|
||||
1. **[VAN](https://huggingface.co/docs/transformers/main/model_doc/van)** (from Tsinghua University and Nankai University) released with the paper [Visual Attention Network](https://arxiv.org/pdf/2202.09741.pdf) by Meng-Hao Guo, Cheng-Ze Lu, Zheng-Ning Liu, Ming-Ming Cheng, Shi-Min Hu.
|
||||
1. **[ViLT](https://huggingface.co/docs/transformers/main/model_doc/vilt)** (from NAVER AI Lab/Kakao Enterprise/Kakao Brain) released with the paper [ViLT: Vision-and-Language Transformer Without Convolution or Region Supervision](https://arxiv.org/abs/2102.03334) by Wonjae Kim, Bokyung Son, Ildoo Kim.
|
||||
1. **[Vision Transformer (ViT)](https://huggingface.co/docs/transformers/model_doc/vit)** (from Google AI) released with the paper [An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale](https://arxiv.org/abs/2010.11929) by Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, Neil Houlsby.
|
||||
1. **[VisualBERT](https://huggingface.co/docs/transformers/model_doc/visual_bert)** (from UCLA NLP) released with the paper [VisualBERT: A Simple and Performant Baseline for Vision and Language](https://arxiv.org/pdf/1908.03557) by Liunian Harold Li, Mark Yatskar, Da Yin, Cho-Jui Hsieh, Kai-Wei Chang.
|
||||
1. **[ViTMAE](https://huggingface.co/docs/transformers/main/model_doc/vit_mae)** (from Meta AI) released with the paper [Masked Autoencoders Are Scalable Vision Learners](https://arxiv.org/abs/2111.06377) by Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Dollár, Ross Girshick.
|
||||
1. **[Wav2Vec2](https://huggingface.co/docs/transformers/model_doc/wav2vec2)** (from Facebook AI) released with the paper [wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations](https://arxiv.org/abs/2006.11477) by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli.
|
||||
1. **[Wav2Vec2Phoneme](https://huggingface.co/docs/transformers/model_doc/wav2vec2_phoneme)** (from Facebook AI) released with the paper [Simple and Effective Zero-shot Cross-lingual Phoneme Recognition](https://arxiv.org/abs/2109.11680) by Qiantong Xu, Alexei Baevski, Michael Auli.
|
||||
1. **[WavLM](https://huggingface.co/docs/transformers/main/model_doc/wavlm)** (from Microsoft Research) released with the paper [WavLM: Large-Scale Self-Supervised Pre-Training for Full Stack Speech Processing](https://arxiv.org/abs/2110.13900) by Sanyuan Chen, Chengyi Wang, Zhengyang Chen, Yu Wu, Shujie Liu, Zhuo Chen, Jinyu Li, Naoyuki Kanda, Takuya Yoshioka, Xiong Xiao, Jian Wu, Long Zhou, Shuo Ren, Yanmin Qian, Yao Qian, Jian Wu, Michael Zeng, Furu Wei.
|
||||
1. **[XGLM](https://huggingface.co/docs/transformers/model_doc/xglm)** (From Facebook AI) released with the paper [Few-shot Learning with Multilingual Language Models](https://arxiv.org/abs/2112.10668) by Xi Victoria Lin, Todor Mihaylov, Mikel Artetxe, Tianlu Wang, Shuohui Chen, Daniel Simig, Myle Ott, Naman Goyal, Shruti Bhosale, Jingfei Du, Ramakanth Pasunuru, Sam Shleifer, Punit Singh Koura, Vishrav Chaudhary, Brian O'Horo, Jeff Wang, Luke Zettlemoyer, Zornitsa Kozareva, Mona Diab, Veselin Stoyanov, Xian Li.
|
||||
1. **[XLM](https://huggingface.co/docs/transformers/model_doc/xlm)** (from Facebook) released together with the paper [Cross-lingual Language Model Pretraining](https://arxiv.org/abs/1901.07291) by Guillaume Lample and Alexis Conneau.
|
||||
1. **[XLM-ProphetNet](https://huggingface.co/docs/transformers/model_doc/xlmprophetnet)** (from Microsoft Research) released with the paper [ProphetNet: Predicting Future N-gram for Sequence-to-Sequence Pre-training](https://arxiv.org/abs/2001.04063) by Yu Yan, Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang and Ming Zhou.
|
||||
1. **[XLM-RoBERTa](https://huggingface.co/docs/transformers/model_doc/xlmroberta)** (from Facebook AI), released together with the paper [Unsupervised Cross-lingual Representation Learning at Scale](https://arxiv.org/abs/1911.02116) by Alexis Conneau*, Kartikay Khandelwal*, Naman Goyal, Vishrav Chaudhary, Guillaume Wenzek, Francisco Guzmán, Edouard Grave, Myle Ott, Luke Zettlemoyer and Veselin Stoyanov.
|
||||
1. **[XLM-ProphetNet](https://huggingface.co/docs/transformers/model_doc/xlm-prophetnet)** (from Microsoft Research) released with the paper [ProphetNet: Predicting Future N-gram for Sequence-to-Sequence Pre-training](https://arxiv.org/abs/2001.04063) by Yu Yan, Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang and Ming Zhou.
|
||||
1. **[XLM-RoBERTa](https://huggingface.co/docs/transformers/model_doc/xlm-roberta)** (from Facebook AI), released together with the paper [Unsupervised Cross-lingual Representation Learning at Scale](https://arxiv.org/abs/1911.02116) by Alexis Conneau*, Kartikay Khandelwal*, Naman Goyal, Vishrav Chaudhary, Guillaume Wenzek, Francisco Guzmán, Edouard Grave, Myle Ott, Luke Zettlemoyer and Veselin Stoyanov.
|
||||
1. **[XLM-RoBERTa-XL](https://huggingface.co/docs/transformers/model_doc/xlm-roberta-xl)** (from Facebook AI) released with the paper [Larger-Scale Transformers for Multilingual Masked Language Modeling](https://arxiv.org/abs/2105.00572) by Naman Goyal, Jingfei Du, Myle Ott, Giri Anantharaman, Alexis Conneau.
|
||||
1. **[XLNet](https://huggingface.co/docs/transformers/model_doc/xlnet)** (from Google/CMU) released with the paper [XLNet: Generalized Autoregressive Pretraining for Language Understanding](https://arxiv.org/abs/1906.08237) by Zhilin Yang*, Zihang Dai*, Yiming Yang, Jaime Carbonell, Ruslan Salakhutdinov, Quoc V. Le.
|
||||
1. **[XLS-R](https://huggingface.co/docs/transformers/model_doc/xls_r)** (from Facebook AI) released with the paper [XLS-R: Self-supervised Cross-lingual Speech Representation Learning at Scale](https://arxiv.org/abs/2111.09296) by Arun Babu, Changhan Wang, Andros Tjandra, Kushal Lakhotia, Qiantong Xu, Naman Goyal, Kritika Singh, Patrick von Platen, Yatharth Saraf, Juan Pino, Alexei Baevski, Alexis Conneau, Michael Auli.
|
||||
1. **[XLSR-Wav2Vec2](https://huggingface.co/docs/transformers/model_doc/xlsr_wav2vec2)** (from Facebook AI) released with the paper [Unsupervised Cross-Lingual Representation Learning For Speech Recognition](https://arxiv.org/abs/2006.13979) by Alexis Conneau, Alexei Baevski, Ronan Collobert, Abdelrahman Mohamed, Michael Auli.
|
||||
1. **[YOSO](https://huggingface.co/docs/transformers/main/model_doc/yoso)** (from the University of Wisconsin - Madison) released with the paper [You Only Sample (Almost) by Zhanpeng Zeng, Yunyang Xiong, Sathya N. Ravi, Shailesh Acharya, Glenn Fung, Vikas Singh.
|
||||
1. 想要貢獻新的模型?我們這裡有一份**詳細指引和模板**來引導你加入新的模型。你可以在 [`templates`](./templates) 目錄中找到它們。記得查看[貢獻指引](./CONTRIBUTING.md)並在開始寫 PR 前聯繫維護人員或開一個新的 issue 來獲得 feedbacks。
|
||||
|
||||
要檢查某個模型是否已有 Flax、PyTorch 或 TensorFlow 的實作,或其是否在🤗 Tokenizers 函式庫中有對應的 tokenizer,敬請參閱[此表](https://huggingface.co/docs/transformers/index#supported-frameworks)。
|
||||
@ -346,7 +368,7 @@ conda install -c huggingface transformers
|
||||
| [任務概覽](https://huggingface.co/docs/transformers/task_summary) | 🤗 Transformers 支援的任務 |
|
||||
| [預處理教學](https://huggingface.co/docs/transformers/preprocessing) | 使用 `Tokenizer` 來為模型準備資料 |
|
||||
| [訓練和微調](https://huggingface.co/docs/transformers/training) | 使用 PyTorch/TensorFlow 的內建的訓練方式或於 `Trainer` API 中使用 🤗 Transformers 提供的模型 |
|
||||
| [快速上手:微調和範例腳本](https://github.com/huggingface/transformers/tree/master/examples) | 為各種任務提供的範例腳本 |
|
||||
| [快速上手:微調和範例腳本](https://github.com/huggingface/transformers/tree/main/examples) | 為各種任務提供的範例腳本 |
|
||||
| [模型分享和上傳](https://huggingface.co/docs/transformers/model_sharing) | 上傳並與社群分享你微調的模型 |
|
||||
| [遷移](https://huggingface.co/docs/transformers/migration) | 從 `pytorch-transformers` 或 `pytorch-pretrained-bert` 遷移到 🤗 Transformers |
|
||||
|
||||
|
||||
@ -15,6 +15,7 @@
|
||||
# tests directory-specific settings - this file is run automatically
|
||||
# by pytest before any tests are run
|
||||
|
||||
import doctest
|
||||
import sys
|
||||
import warnings
|
||||
from os.path import abspath, dirname, join
|
||||
@ -22,7 +23,7 @@ from os.path import abspath, dirname, join
|
||||
|
||||
# allow having multiple repository checkouts and not needing to remember to rerun
|
||||
# 'pip install -e .[dev]' when switching between checkouts and running tests.
|
||||
git_repo_path = abspath(join(dirname(dirname(__file__)), "src"))
|
||||
git_repo_path = abspath(join(dirname(__file__), "src"))
|
||||
sys.path.insert(1, git_repo_path)
|
||||
|
||||
# silence FutureWarning warnings in tests since often we can't act on them until
|
||||
@ -59,3 +60,19 @@ def pytest_sessionfinish(session, exitstatus):
|
||||
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
|
||||
if exitstatus == 5:
|
||||
session.exitstatus = 0
|
||||
|
||||
|
||||
# Doctest custom flag to ignore output.
|
||||
IGNORE_RESULT = doctest.register_optionflag('IGNORE_RESULT')
|
||||
|
||||
OutputChecker = doctest.OutputChecker
|
||||
|
||||
|
||||
class CustomOutputChecker(OutputChecker):
|
||||
def check_output(self, want, got, optionflags):
|
||||
if IGNORE_RESULT & optionflags:
|
||||
return True
|
||||
return OutputChecker.check_output(self, want, got, optionflags)
|
||||
|
||||
|
||||
doctest.OutputChecker = CustomOutputChecker
|
||||
22
docker/transformers-all-latest-gpu/Dockerfile
Normal file
22
docker/transformers-all-latest-gpu/Dockerfile
Normal file
@ -0,0 +1,22 @@
|
||||
FROM nvidia/cuda:11.2.2-cudnn8-devel-ubuntu20.04
|
||||
LABEL maintainer="Hugging Face"
|
||||
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
RUN apt update
|
||||
RUN apt install -y git libsndfile1-dev tesseract-ocr espeak-ng python3 python3-pip ffmpeg
|
||||
RUN python3 -m pip install --no-cache-dir --upgrade pip
|
||||
|
||||
ARG REF=main
|
||||
RUN git clone https://github.com/huggingface/transformers && cd transformers && git checkout $REF
|
||||
RUN python3 -m pip install --no-cache-dir -e ./transformers[dev,onnxruntime]
|
||||
|
||||
RUN python3 -m pip install --no-cache-dir -U torch tensorflow
|
||||
RUN python3 -m pip uninstall -y flax jax
|
||||
RUN python3 -m pip install --no-cache-dir torch-scatter -f https://data.pyg.org/whl/torch-$(python3 -c "from torch import version; print(version.__version__.split('+')[0])")+cu102.html
|
||||
RUN python3 -m pip install --no-cache-dir git+https://github.com/facebookresearch/detectron2.git pytesseract https://github.com/kpu/kenlm/archive/master.zip
|
||||
RUN python3 -m pip install -U "itsdangerous<2.1.0"
|
||||
|
||||
# When installing in editable mode, `transformers` is not recognized as a package.
|
||||
# this line must be added in order for python to be aware of transformers.
|
||||
RUN cd transformers && python3 setup.py develop
|
||||
19
docker/transformers-doc-builder/Dockerfile
Normal file
19
docker/transformers-doc-builder/Dockerfile
Normal file
@ -0,0 +1,19 @@
|
||||
FROM python:3.8
|
||||
LABEL maintainer="Hugging Face"
|
||||
|
||||
RUN apt update
|
||||
RUN git clone https://github.com/huggingface/transformers
|
||||
|
||||
RUN python3 -m pip install --no-cache-dir --upgrade pip && python3 -m pip install --no-cache-dir git+https://github.com/huggingface/doc-builder ./transformers[dev]
|
||||
RUN apt-get -y update && apt-get install -y libsndfile1-dev && apt install -y tesseract-ocr
|
||||
|
||||
# Torch needs to be installed before deepspeed
|
||||
RUN python3 -m pip install --no-cache-dir ./transformers[deepspeed]
|
||||
|
||||
RUN python3 -m pip install --no-cache-dir torch-scatter -f https://data.pyg.org/whl/torch-$(python -c "from torch import version; print(version.__version__.split('+')[0])")+cpu.html
|
||||
RUN python3 -m pip install --no-cache-dir torchvision git+https://github.com/facebookresearch/detectron2.git pytesseract https://github.com/kpu/kenlm/archive/master.zip
|
||||
RUN python3 -m pip install --no-cache-dir pytorch-quantization --extra-index-url https://pypi.ngc.nvidia.com
|
||||
RUN python3 -m pip install -U "itsdangerous<2.1.0"
|
||||
|
||||
RUN doc-builder build transformers transformers/docs/source --build_dir doc-build-dev --notebook_dir notebooks/transformers_doc --clean --version pr_$PR_NUMBER
|
||||
RUN rm -rf doc-build-dev
|
||||
21
docker/transformers-pytorch-deepspeed-latest-gpu/Dockerfile
Normal file
21
docker/transformers-pytorch-deepspeed-latest-gpu/Dockerfile
Normal file
@ -0,0 +1,21 @@
|
||||
FROM nvcr.io/nvidia/pytorch:21.03-py3
|
||||
LABEL maintainer="Hugging Face"
|
||||
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
RUN apt -y update
|
||||
RUN apt install -y libaio-dev
|
||||
RUN python3 -m pip install --no-cache-dir --upgrade pip
|
||||
|
||||
ARG REF=main
|
||||
RUN git clone https://github.com/huggingface/transformers && cd transformers && git checkout $REF
|
||||
RUN python3 -m pip install --no-cache-dir -e ./transformers[testing,deepspeed]
|
||||
|
||||
RUN git clone https://github.com/microsoft/DeepSpeed && cd DeepSpeed && rm -rf build && \
|
||||
DS_BUILD_CPU_ADAM=1 DS_BUILD_AIO=1 DS_BUILD_UTILS=1 python3 -m pip install -e . --global-option="build_ext" --global-option="-j8" --no-cache -v --disable-pip-version-check 2>&1
|
||||
|
||||
# When installing in editable mode, `transformers` is not recognized as a package.
|
||||
# this line must be added in order for python to be aware of transformers.
|
||||
RUN cd transformers && python3 setup.py develop
|
||||
|
||||
RUN python3 -c "from deepspeed.launcher.runner import main"
|
||||
@ -1,30 +1,26 @@
|
||||
FROM nvidia/cuda:10.2-cudnn7-devel-ubuntu18.04
|
||||
FROM nvidia/cuda:11.2.2-cudnn8-devel-ubuntu20.04
|
||||
LABEL maintainer="Hugging Face"
|
||||
LABEL repository="transformers"
|
||||
|
||||
RUN apt update && \
|
||||
apt install -y bash \
|
||||
build-essential \
|
||||
git \
|
||||
curl \
|
||||
ca-certificates \
|
||||
python3 \
|
||||
python3-pip && \
|
||||
rm -rf /var/lib/apt/lists
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
RUN python3 -m pip install --no-cache-dir --upgrade pip && \
|
||||
python3 -m pip install --no-cache-dir \
|
||||
mkl \
|
||||
torch
|
||||
RUN apt update
|
||||
RUN apt install -y git libsndfile1-dev tesseract-ocr espeak-ng python3 python3-pip ffmpeg
|
||||
RUN python3 -m pip install --no-cache-dir --upgrade pip
|
||||
|
||||
RUN git clone https://github.com/NVIDIA/apex
|
||||
RUN cd apex && \
|
||||
python3 setup.py install && \
|
||||
pip install -v --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" ./
|
||||
ARG REF=main
|
||||
RUN git clone https://github.com/huggingface/transformers && cd transformers && git checkout $REF
|
||||
RUN python3 -m pip install --no-cache-dir -e ./transformers[dev-torch,testing]
|
||||
|
||||
WORKDIR /workspace
|
||||
COPY . transformers/
|
||||
RUN cd transformers/ && \
|
||||
python3 -m pip install --no-cache-dir .
|
||||
# If set to nothing, will install the latest version
|
||||
ARG PYTORCH=''
|
||||
|
||||
CMD ["/bin/bash"]
|
||||
RUN [ ${#PYTORCH} -gt 0 ] && VERSION='torch=='$PYTORCH'.*' || VERSION='torch'; python3 -m pip install --no-cache-dir -U $VERSION
|
||||
RUN python3 -m pip uninstall -y tensorflow flax
|
||||
|
||||
RUN python3 -m pip install --no-cache-dir torch-scatter -f https://data.pyg.org/whl/torch-$(python3 -c "from torch import version; print(version.__version__.split('+')[0])")+cu102.html
|
||||
RUN python3 -m pip install --no-cache-dir git+https://github.com/facebookresearch/detectron2.git pytesseract https://github.com/kpu/kenlm/archive/master.zip
|
||||
RUN python3 -m pip install -U "itsdangerous<2.1.0"
|
||||
|
||||
# When installing in editable mode, `transformers` is not recognized as a package.
|
||||
# this line must be added in order for python to be aware of transformers.
|
||||
RUN cd transformers && python3 setup.py develop
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
FROM google/cloud-sdk:slim
|
||||
|
||||
# Build args.
|
||||
ARG GITHUB_REF=refs/heads/master
|
||||
ARG GITHUB_REF=refs/heads/main
|
||||
|
||||
# TODO: This Dockerfile installs pytorch/xla 3.6 wheels. There are also 3.7
|
||||
# wheels available; see below.
|
||||
|
||||
@ -1,25 +1,23 @@
|
||||
FROM nvidia/cuda:10.1-cudnn7-runtime-ubuntu18.04
|
||||
FROM nvidia/cuda:11.2.2-cudnn8-devel-ubuntu20.04
|
||||
LABEL maintainer="Hugging Face"
|
||||
LABEL repository="transformers"
|
||||
|
||||
RUN apt update && \
|
||||
apt install -y bash \
|
||||
build-essential \
|
||||
git \
|
||||
curl \
|
||||
ca-certificates \
|
||||
python3 \
|
||||
python3-pip && \
|
||||
rm -rf /var/lib/apt/lists
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
RUN python3 -m pip install --no-cache-dir --upgrade pip && \
|
||||
python3 -m pip install --no-cache-dir \
|
||||
mkl \
|
||||
tensorflow
|
||||
RUN apt update
|
||||
RUN apt install -y git libsndfile1-dev tesseract-ocr espeak-ng python3 python3-pip ffmpeg
|
||||
RUN python3 -m pip install --no-cache-dir --upgrade pip
|
||||
|
||||
WORKDIR /workspace
|
||||
COPY . transformers/
|
||||
RUN cd transformers/ && \
|
||||
python3 -m pip install --no-cache-dir .
|
||||
ARG REF=main
|
||||
RUN git clone https://github.com/huggingface/transformers && cd transformers && git checkout $REF
|
||||
RUN python3 -m pip install --no-cache-dir -e ./transformers[dev-tensorflow,testing]
|
||||
|
||||
CMD ["/bin/bash"]
|
||||
# If set to nothing, will install the latest version
|
||||
ARG TENSORFLOW=''
|
||||
|
||||
RUN [ ${#TENSORFLOW} -gt 0 ] && VERSION='tensorflow=='$TENSORFLOW'.*' || VERSION='tensorflow'; python3 -m pip install --no-cache-dir -U $VERSION
|
||||
RUN python3 -m pip uninstall -y torch flax
|
||||
RUN python3 -m pip install -U "itsdangerous<2.1.0"
|
||||
|
||||
# When installing in editable mode, `transformers` is not recognized as a package.
|
||||
# this line must be added in order for python to be aware of transformers.
|
||||
RUN cd transformers && python3 setup.py develop
|
||||
|
||||
@ -1,19 +0,0 @@
|
||||
# Minimal makefile for Sphinx documentation
|
||||
#
|
||||
|
||||
# You can set these variables from the command line.
|
||||
SPHINXOPTS =
|
||||
SPHINXBUILD = sphinx-build
|
||||
SOURCEDIR = source
|
||||
BUILDDIR = _build
|
||||
|
||||
# Put it first so that "make" without argument is like "make help".
|
||||
help:
|
||||
@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
|
||||
|
||||
.PHONY: help Makefile
|
||||
|
||||
# Catch-all target: route all unknown targets to Sphinx using the new
|
||||
# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
|
||||
%: Makefile
|
||||
@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
|
||||
375
docs/README.md
375
docs/README.md
@ -23,6 +23,12 @@ you can install them with the following command, at the root of the code reposit
|
||||
pip install -e ".[docs]"
|
||||
```
|
||||
|
||||
Then you need to install our special tool that builds the documentation:
|
||||
|
||||
```bash
|
||||
pip install git+https://github.com/huggingface/doc-builder
|
||||
```
|
||||
|
||||
---
|
||||
**NOTE**
|
||||
|
||||
@ -31,88 +37,72 @@ check how they look like before committing for instance). You don't have to comm
|
||||
|
||||
---
|
||||
|
||||
## Packages installed
|
||||
|
||||
Here's an overview of all the packages installed. If you ran the previous command installing all packages from
|
||||
`requirements.txt`, you do not need to run the following commands.
|
||||
|
||||
Building it requires the package `sphinx` that you can
|
||||
install using:
|
||||
|
||||
```bash
|
||||
pip install -U sphinx
|
||||
```
|
||||
|
||||
You would also need the custom installed [theme](https://github.com/readthedocs/sphinx_rtd_theme) by
|
||||
[Read The Docs](https://readthedocs.org/). You can install it using the following command:
|
||||
|
||||
```bash
|
||||
pip install sphinx_rtd_theme
|
||||
```
|
||||
|
||||
The third necessary package is the `recommonmark` package to accept Markdown as well as Restructured text:
|
||||
|
||||
```bash
|
||||
pip install recommonmark
|
||||
```
|
||||
|
||||
## Building the documentation
|
||||
|
||||
Once you have setup `sphinx`, you can build the documentation by running the following command in the `/docs` folder:
|
||||
Once you have setup the `doc-builder` and additional packages, you can generate the documentation by
|
||||
typing the following command:
|
||||
|
||||
```bash
|
||||
make html
|
||||
doc-builder build transformers docs/source/ --build_dir ~/tmp/test-build
|
||||
```
|
||||
|
||||
A folder called ``_build/html`` should have been created. You can now open the file ``_build/html/index.html`` in your
|
||||
browser.
|
||||
You can adapt the `--build_dir` to set any temporary folder that you prefer. This command will create it and generate
|
||||
the MDX files that will be rendered as the documentation on the main website. You can inspect them in your favorite
|
||||
Markdown editor.
|
||||
|
||||
---
|
||||
**NOTE**
|
||||
|
||||
If you are adding/removing elements from the toc-tree or from any structural item, it is recommended to clean the build
|
||||
directory before rebuilding. Run the following command to clean and build:
|
||||
|
||||
```bash
|
||||
make clean && make html
|
||||
```
|
||||
It's not possible to see locally how the final documentation will look like for now. Once you have opened a PR, you
|
||||
will see a bot add a comment to a link where the documentation with your changes lives.
|
||||
|
||||
---
|
||||
|
||||
It should build the static app that will be available under `/docs/_build/html`
|
||||
## Adding a new element to the navigation bar
|
||||
|
||||
## Adding a new element to the tree (toc-tree)
|
||||
Accepted files are Markdown (.md or .mdx).
|
||||
|
||||
Accepted files are reStructuredText (.rst) and Markdown (.md). Create a file with its extension and put it
|
||||
in the source directory. You can then link it to the toc-tree by putting the filename without the extension.
|
||||
Create a file with its extension and put it in the source directory. You can then link it to the toc-tree by putting
|
||||
the filename without the extension in the [`_toctree.yml`](https://github.com/huggingface/transformers/blob/main/docs/source/_toctree.yml) file.
|
||||
|
||||
## Preview the documentation in a pull request
|
||||
## Renaming section headers and moving sections
|
||||
|
||||
Once you have made your pull request, you can check what the documentation will look like after it's merged by
|
||||
following these steps:
|
||||
It helps to keep the old links working when renaming section header and/or moving sections from one document to another. This is because the old links are likely to be used in Issues, Forums and Social media and it'd be make for a much more superior user experience if users reading those months later could still easily navigate to the originally intended information.
|
||||
|
||||
Therefore we simply keep a little map of moved sections at the end of the document where the original section was. The key is to preserve the original anchor.
|
||||
|
||||
So if you renamed a section from: "Section A" to "Section B", then you can add at the end of the file:
|
||||
|
||||
```
|
||||
Sections that were moved:
|
||||
|
||||
[ <a href="#section-b">Section A</a><a id="section-a"></a> ]
|
||||
```
|
||||
and of course if you moved it to another file, then:
|
||||
|
||||
```
|
||||
Sections that were moved:
|
||||
|
||||
[ <a href="../new-file#section-b">Section A</a><a id="section-a"></a> ]
|
||||
```
|
||||
|
||||
Use the relative style to link to the new file so that the versioned docs continue to work.
|
||||
|
||||
For an example of a rich moved sections set please see the very end of [the Trainer doc](https://github.com/huggingface/transformers/blob/main/docs/source/main_classes/trainer.mdx).
|
||||
|
||||
- Look at the checks at the bottom of the conversation page of your PR (you may need to click on "show all checks" to
|
||||
expand them).
|
||||
- Click on "details" next to the `ci/circleci: build_doc` check.
|
||||
- In the new window, click on the "Artifacts" tab.
|
||||
- Locate the file "docs/_build/html/index.html" (or any specific page you want to check) and click on it to get a
|
||||
preview.
|
||||
|
||||
## Writing Documentation - Specification
|
||||
|
||||
The `huggingface/transformers` documentation follows the
|
||||
[Google documentation](https://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_google.html) style. It is
|
||||
mostly written in ReStructuredText
|
||||
([Sphinx simple documentation](https://www.sphinx-doc.org/en/master/usage/restructuredtext/index.html),
|
||||
[Sourceforge complete documentation](https://docutils.sourceforge.io/docs/ref/rst/restructuredtext.html)).
|
||||
|
||||
[Google documentation](https://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_google.html) style for docstrings,
|
||||
although we can write them directly in Markdown.
|
||||
|
||||
### Adding a new tutorial
|
||||
|
||||
Adding a new tutorial or section is done in two steps:
|
||||
|
||||
- Add a new file under `./source`. This file can either be ReStructuredText (.rst) or Markdown (.md).
|
||||
- Link that file in `./source/index.rst` on the correct toc-tree.
|
||||
- Link that file in `./source/_toctree.yml` on the correct toc-tree.
|
||||
|
||||
Make sure to put your new file under the proper section. It's unlikely to go in the first section (*Get Started*), so
|
||||
depending on the intended targets (beginners, more advanced users or researchers) it should go in section two, three or
|
||||
@ -122,8 +112,8 @@ four.
|
||||
|
||||
When adding a new model:
|
||||
|
||||
- Create a file `xxx.rst` under `./source/model_doc` (don't hesitate to copy an existing file as template).
|
||||
- Link that file in `./source/index.rst` on the `model_doc` toc-tree.
|
||||
- Create a file `xxx.mdx` or under `./source/model_doc` (don't hesitate to copy an existing file as template).
|
||||
- Link that file in `./source/_toctree.yml`.
|
||||
- Write a short overview of the model:
|
||||
- Overview with paper & authors
|
||||
- Paper abstract
|
||||
@ -137,64 +127,82 @@ When adding a new model:
|
||||
- PyTorch head models
|
||||
- TensorFlow base model
|
||||
- TensorFlow head models
|
||||
- Flax base model
|
||||
- Flax head models
|
||||
|
||||
These classes should be added using our Markdown syntax. Usually as follows:
|
||||
|
||||
These classes should be added using the RST syntax. Usually as follows:
|
||||
```
|
||||
XXXConfig
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
## XXXConfig
|
||||
|
||||
.. autoclass:: transformers.XXXConfig
|
||||
:members:
|
||||
[[autodoc]] XXXConfig
|
||||
```
|
||||
|
||||
This will include every public method of the configuration that is documented. If for some reason you wish for a method
|
||||
not to be displayed in the documentation, you can do so by specifying which methods should be in the docs:
|
||||
|
||||
```
|
||||
XXXTokenizer
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
## XXXTokenizer
|
||||
|
||||
.. autoclass:: transformers.XXXTokenizer
|
||||
:members: build_inputs_with_special_tokens, get_special_tokens_mask,
|
||||
create_token_type_ids_from_sequences, save_vocabulary
|
||||
[[autodoc]] XXXTokenizer
|
||||
- build_inputs_with_special_tokens
|
||||
- get_special_tokens_mask
|
||||
- create_token_type_ids_from_sequences
|
||||
- save_vocabulary
|
||||
```
|
||||
|
||||
If you just want to add a method that is not documented (for instance magic method like `__call__` are not documented
|
||||
byt default) you can put the list of methods to add in a list that contains `all`:
|
||||
|
||||
```
|
||||
## XXXTokenizer
|
||||
|
||||
[[autodoc]] XXXTokenizer
|
||||
- all
|
||||
- __call__
|
||||
```
|
||||
|
||||
### Writing source documentation
|
||||
|
||||
Values that should be put in `code` should either be surrounded by double backticks: \`\`like so\`\` or be written as
|
||||
an object using the :obj: syntax: :obj:\`like so\`. Note that argument names and objects like True, None or any strings
|
||||
should usually be put in `code`.
|
||||
Values that should be put in `code` should either be surrounded by backticks: \`like so\`. Note that argument names
|
||||
and objects like True, None or any strings should usually be put in `code`.
|
||||
|
||||
When mentioning a class, it is recommended to use the :class: syntax as the mentioned class will be automatically
|
||||
linked by Sphinx: :class:\`~transformers.XXXClass\`
|
||||
When mentioning a class, function or method, it is recommended to use our syntax for internal links so that our tool
|
||||
adds a link to its documentation with this syntax: \[\`XXXClass\`\] or \[\`function\`\]. This requires the class or
|
||||
function to be in the main package.
|
||||
|
||||
When mentioning a function, it is recommended to use the :func: syntax as the mentioned function will be automatically
|
||||
linked by Sphinx: :func:\`~transformers.function\`.
|
||||
If you want to create a link to some internal class or function, you need to
|
||||
provide its path. For instance: \[\`utils.ModelOutput\`\]. This will be converted into a link with
|
||||
`utils.ModelOutput` in the description. To get rid of the path and only keep the name of the object you are
|
||||
linking to in the description, add a ~: \[\`~utils.ModelOutput\`\] will generate a link with `ModelOutput` in the description.
|
||||
|
||||
When mentioning a method, it is recommended to use the :meth: syntax as the mentioned method will be automatically
|
||||
linked by Sphinx: :meth:\`~transformers.XXXClass.method\`.
|
||||
|
||||
Links should be done as so (note the double underscore at the end): \`text for the link <./local-link-or-global-link#loc>\`__
|
||||
The same works for methods so you can either use \[\`XXXClass.method\`\] or \[~\`XXXClass.method\`\].
|
||||
|
||||
#### Defining arguments in a method
|
||||
|
||||
Arguments should be defined with the `Args:` prefix, followed by a line return and an indentation.
|
||||
The argument should be followed by its type, with its shape if it is a tensor, and a line return.
|
||||
Another indentation is necessary before writing the description of the argument.
|
||||
Arguments should be defined with the `Args:` (or `Arguments:` or `Parameters:`) prefix, followed by a line return and
|
||||
an indentation. The argument should be followed by its type, with its shape if it is a tensor, a colon and its
|
||||
description:
|
||||
|
||||
```
|
||||
Args:
|
||||
n_layers (`int`): The number of layers of the model.
|
||||
```
|
||||
|
||||
If the description is too long to fit in one line, another indentation is necessary before writing the description
|
||||
after th argument.
|
||||
|
||||
Here's an example showcasing everything so far:
|
||||
|
||||
```
|
||||
Args:
|
||||
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
|
||||
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
|
||||
Indices of input sequence tokens in the vocabulary.
|
||||
|
||||
Indices can be obtained using :class:`~transformers.AlbertTokenizer`.
|
||||
See :meth:`~transformers.PreTrainedTokenizer.encode` and
|
||||
:meth:`~transformers.PreTrainedTokenizer.__call__` for details.
|
||||
Indices can be obtained using [`AlbertTokenizer`]. See [`~PreTrainedTokenizer.encode`] and
|
||||
[`~PreTrainedTokenizer.__call__`] for details.
|
||||
|
||||
`What are input IDs? <../glossary.html#input-ids>`__
|
||||
[What are input IDs?](../glossary#input-ids)
|
||||
```
|
||||
|
||||
For optional arguments or arguments with defaults we follow the following syntax: imagine we have a function with the
|
||||
@ -208,93 +216,190 @@ then its documentation should look like this:
|
||||
|
||||
```
|
||||
Args:
|
||||
x (:obj:`str`, `optional`):
|
||||
x (`str`, *optional*):
|
||||
This argument controls ...
|
||||
a (:obj:`float`, `optional`, defaults to 1):
|
||||
a (`float`, *optional*, defaults to 1):
|
||||
This argument is used to ...
|
||||
```
|
||||
|
||||
Note that we always omit the "defaults to :obj:\`None\`" when None is the default for any argument. Also note that even
|
||||
Note that we always omit the "defaults to \`None\`" when None is the default for any argument. Also note that even
|
||||
if the first line describing your argument type and its default gets long, you can't break it on several lines. You can
|
||||
however write as many lines as you want in the indented description (see the example above with `input_ids`).
|
||||
|
||||
#### Writing a multi-line code block
|
||||
|
||||
Multi-line code blocks can be useful for displaying examples. They are done like so:
|
||||
Multi-line code blocks can be useful for displaying examples. They are done between two lines of three backticks as usual in Markdown:
|
||||
|
||||
|
||||
````
|
||||
```
|
||||
Example::
|
||||
|
||||
# first line of code
|
||||
# second line
|
||||
# etc
|
||||
# first line of code
|
||||
# second line
|
||||
# etc
|
||||
```
|
||||
|
||||
The `Example` string at the beginning can be replaced by anything as long as there are two semicolons following it.
|
||||
````
|
||||
|
||||
We follow the [doctest](https://docs.python.org/3/library/doctest.html) syntax for the examples to automatically test
|
||||
the results stay consistent with the library.
|
||||
|
||||
#### Writing a return block
|
||||
|
||||
Arguments should be defined with the `Args:` prefix, followed by a line return and an indentation.
|
||||
The return block should be introduced with the `Returns:` prefix, followed by a line return and an indentation.
|
||||
The first line should be the type of the return, followed by a line return. No need to indent further for the elements
|
||||
building the return.
|
||||
|
||||
Here's an example for tuple return, comprising several objects:
|
||||
|
||||
```
|
||||
Returns:
|
||||
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.BertConfig`) and inputs:
|
||||
loss (`optional`, returned when ``masked_lm_labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
|
||||
Total loss as the sum of the masked language modeling loss and the next sequence prediction (classification) loss.
|
||||
prediction_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`)
|
||||
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
|
||||
```
|
||||
|
||||
Here's an example for a single value return:
|
||||
|
||||
```
|
||||
Returns:
|
||||
:obj:`List[int]`: A list of integers in the range [0, 1] --- 1 for a special token, 0 for a sequence token.
|
||||
`List[int]`: A list of integers in the range [0, 1] --- 1 for a special token, 0 for a sequence token.
|
||||
```
|
||||
|
||||
#### Adding a new section
|
||||
|
||||
In ReST section headers are designated as such with the help of a line of underlying characters, e.g.,:
|
||||
Here's an example for tuple return, comprising several objects:
|
||||
|
||||
```
|
||||
Section 1
|
||||
^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Sub-section 1
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
Returns:
|
||||
`tuple(torch.FloatTensor)` comprising various elements depending on the configuration ([`BertConfig`]) and inputs:
|
||||
- ** loss** (*optional*, returned when `masked_lm_labels` is provided) `torch.FloatTensor` of shape `(1,)` --
|
||||
Total loss as the sum of the masked language modeling loss and the next sequence prediction (classification) loss.
|
||||
- **prediction_scores** (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`) --
|
||||
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
|
||||
```
|
||||
|
||||
ReST allows the use of any characters to designate different section levels, as long as they are used consistently within the same document. For details see [sections doc](https://www.sphinx-doc.org/en/master/usage/restructuredtext/basics.html#sections). Because there is no standard different documents often end up using different characters for the same levels which makes it very difficult to know which character to use when creating a new section.
|
||||
#### Adding an image
|
||||
|
||||
Specifically, if when running `make docs` you get an error like:
|
||||
```
|
||||
docs/source/main_classes/trainer.rst:127:Title level inconsistent:
|
||||
```
|
||||
you picked an inconsistent character for some of the levels.
|
||||
Due to the rapidly growing repository, it is important to make sure that no files that would significantly weigh down the repository are added. This includes images, videos and other non-text files. We prefer to leverage a hf.co hosted `dataset` like
|
||||
the ones hosted on [`hf-internal-testing`](https://huggingface.co/hf-internal-testing) in which to place these files and reference
|
||||
them by URL. We recommend putting them in the following dataset: [huggingface/documentation-images](https://huggingface.co/datasets/huggingface/documentation-images).
|
||||
If an external contribution, feel free to add the images to your PR and ask a Hugging Face member to migrate your images
|
||||
to this dataset.
|
||||
|
||||
But how do you know which characters you must use for an already existing level or when adding a new level?
|
||||
## Styling the docstring
|
||||
|
||||
We have an automatic script running with the `make style` comment that will make sure that:
|
||||
- the docstrings fully take advantage of the line width
|
||||
- all code examples are formatted using black, like the code of the Transformers library
|
||||
|
||||
This script may have some weird failures if you made a syntax mistake or if you uncover a bug. Therefore, it's
|
||||
recommended to commit your changes before running `make style`, so you can revert the changes done by that script
|
||||
easily.
|
||||
|
||||
# Testing documentation examples
|
||||
|
||||
Good documentation oftens comes with an example of how a specific function or class should be used.
|
||||
Each model class should contain at least one example showcasing
|
||||
how to use this model class in inference. *E.g.* the class [Wav2Vec2ForCTC](https://huggingface.co/docs/transformers/model_doc/wav2vec2#transformers.Wav2Vec2ForCTC)
|
||||
includes an example of how to transcribe speech to text in the
|
||||
[docstring of its forward function](https://huggingface.co/docs/transformers/model_doc/wav2vec2#transformers.Wav2Vec2ForCTC.forward).
|
||||
|
||||
## Writing documenation examples
|
||||
|
||||
The syntax for Example docstrings can look as follows:
|
||||
|
||||
You can use this helper script:
|
||||
```
|
||||
perl -ne '/^(.)\1{100,}/ && do { $h{$1}=++$c if !$h{$1} }; END { %h = reverse %h ; print "$_ $h{$_}\n" for sort keys %h}' docs/source/main_classes/trainer.rst
|
||||
1 -
|
||||
2 ~
|
||||
3 ^
|
||||
4 =
|
||||
5 "
|
||||
Example:
|
||||
|
||||
```python
|
||||
>>> from transformers import Wav2Vec2Processor, Wav2Vec2ForCTC
|
||||
>>> from datasets import load_dataset
|
||||
>>> import torch
|
||||
|
||||
>>> dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation")
|
||||
>>> dataset = dataset.sort("id")
|
||||
>>> sampling_rate = dataset.features["audio"].sampling_rate
|
||||
|
||||
>>> processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-base-960h")
|
||||
>>> model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-960h")
|
||||
|
||||
>>> # audio file is decoded on the fly
|
||||
>>> inputs = processor(dataset[0]["audio"]["array"], sampling_rate=sampling_rate, return_tensors="pt")
|
||||
>>> with torch.no_grad():
|
||||
... logits = model(**inputs).logits
|
||||
>>> predicted_ids = torch.argmax(logits, dim=-1)
|
||||
|
||||
>>> # transcribe speech
|
||||
>>> transcription = processor.batch_decode(predicted_ids)
|
||||
>>> transcription[0]
|
||||
'MISTER QUILTER IS THE APOSTLE OF THE MIDDLE CLASSES AND WE ARE GLAD TO WELCOME HIS GOSPEL'
|
||||
```
|
||||
```
|
||||
|
||||
This tells you which characters have already been assigned for each level.
|
||||
The docstring should give a minimal, clear example of how the respective model
|
||||
is to be used in inference and also include the expected (ideally sensible)
|
||||
output.
|
||||
Often, readers will try out the example before even going through the function
|
||||
or class definitions. Therefore it is of utmost importance that the example
|
||||
works as expected.
|
||||
|
||||
So using this particular example's output -- if your current section's header uses `=` as its underline character, you now know you're at level 4, and if you want to add a sub-section header you know you want `"` as it'd level 5.
|
||||
## Docstring testing
|
||||
|
||||
If you needed to add yet another sub-level, then pick a character that is not used already. That is you must pick a character that is not in the output of that script.
|
||||
To do so each example should be included in the doctests.
|
||||
We use pytests' [doctest integration](https://docs.pytest.org/doctest.html) to verify that all of our examples run correctly.
|
||||
For Transformers, the doctests are run on a daily basis via GitHub Actions as can be
|
||||
seen [here](https://github.com/huggingface/transformers/actions/workflows/doctests.yml).
|
||||
|
||||
Here is the full list of characters that can be used in this context: `= - ` : ' " ~ ^ _ * + # < >`
|
||||
To include your example in the daily doctests, you need add the filename that
|
||||
contains the example docstring to the [documentation_tests.txt](../utils/documentation_tests.txt).
|
||||
|
||||
### For Python files
|
||||
|
||||
You will first need to run the following command (from the root of the repository) to prepare the doc file (doc-testing needs to add additional lines that we don't include in the doc source files):
|
||||
|
||||
```bash
|
||||
python utils/prepare_for_doc_test.py src docs
|
||||
```
|
||||
|
||||
If you work on a specific python module, say `modeling_wav2vec2.py`, you can run the command as follows (to avoid the unnecessary temporary changes in irrelevant files):
|
||||
|
||||
```bash
|
||||
python utils/prepare_for_doc_test.py src/transformers/utils/doc.py src/transformers/models/wav2vec2/modeling_wav2vec2.py
|
||||
```
|
||||
(`utils/doc.py` should always be included)
|
||||
|
||||
Then you can run all the tests in the docstrings of a given file with the following command, here is how we test the modeling file of Wav2Vec2 for instance:
|
||||
|
||||
```bash
|
||||
pytest --doctest-modules src/transformers/models/wav2vec2/modeling_wav2vec2.py -sv --doctest-continue-on-failure
|
||||
```
|
||||
|
||||
If you want to isolate a specific docstring, just add `::` after the file name then type the whole path of the function/class/method whose docstring you want to test. For instance, here is how to just test the forward method of `Wav2Vec2ForCTC`:
|
||||
|
||||
```bash
|
||||
pytest --doctest-modules src/transformers/models/wav2vec2/modeling_wav2vec2.py::transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForCTC.forward -sv --doctest-continue-on-failure
|
||||
```
|
||||
|
||||
Once you're done, you can run the following command (still from the root of the repository) to undo the changes made by the first command before committing:
|
||||
|
||||
```bash
|
||||
python utils/prepare_for_doc_test.py src docs --remove_new_line
|
||||
```
|
||||
|
||||
### For Markdown files
|
||||
|
||||
You will first need to run the following command (from the root of the repository) to prepare the doc file (doc-testing needs to add additional lines that we don't include in the doc source files):
|
||||
|
||||
```bash
|
||||
python utils/prepare_for_doc_test.py src docs
|
||||
```
|
||||
|
||||
Then you can test locally a given file with this command (here testing the quicktour):
|
||||
|
||||
```bash
|
||||
pytest --doctest-modules docs/source/quicktour.mdx -sv --doctest-continue-on-failure --doctest-glob="*.mdx"
|
||||
```
|
||||
|
||||
Once you're done, you can run the following command (still from the root of the repository) to undo the changes made by the first command before committing:
|
||||
|
||||
```bash
|
||||
python utils/prepare_for_doc_test.py src docs --remove_new_line
|
||||
```
|
||||
|
||||
### Writing doctests
|
||||
|
||||
Here are a few tips to help you debug the doctests and make them pass:
|
||||
|
||||
- The outputs of the code need to match the expected output **exactly**, so make sure you have the same outputs. In particular doctest will see a difference between single quotes and double quotes, or a missing parenthesis. The only exceptions to that rule are:
|
||||
* whitespace: one give whitespace (space, tabulation, new line) is equivalent to any number of whitespace, so you can add new lines where there are spaces to make your output more readable.
|
||||
* numerical values: you should never put more than 4 or 5 digits to expected results as different setups or library versions might get you slightly different results. `doctest` is configure to ignore any difference lower than the precision to which you wrote (so 1e-4 if you write 4 digits).
|
||||
- Don't leave a block of code that is very long to execute. If you can't make it fast, you can either not use the doctest syntax on it (so that it's ignored), or if you want to use the doctest syntax to show the results, you can add a comment `# doctest: +SKIP` at the end of the lines of code too long to execute
|
||||
- Each line of code that produces a result needs to have that result written below. You can ignore an output if you don't want to show it in your code example by adding a comment ` # doctest: +IGNORE_RESULT` at the end of the line of code produing it.
|
||||
|
||||
@ -6,4 +6,9 @@ INSTALL_CONTENT = """
|
||||
# ! pip install git+https://github.com/huggingface/transformers.git
|
||||
"""
|
||||
|
||||
notebook_first_cells = [{"type": "code", "content": INSTALL_CONTENT}]
|
||||
notebook_first_cells = [{"type": "code", "content": INSTALL_CONTENT}]
|
||||
black_avoid_patterns = {
|
||||
"{processor_class}": "FakeProcessorClass",
|
||||
"{model_class}": "FakeModelClass",
|
||||
"{object_class}": "FakeObjectClass",
|
||||
}
|
||||
|
||||
@ -1,143 +0,0 @@
|
||||
..
|
||||
Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
|
||||
How to add a pipeline to 🤗 Transformers?
|
||||
=======================================================================================================================
|
||||
|
||||
First and foremost, you need to decide the raw entries the pipeline will be able to take. It can be strings, raw bytes,
|
||||
dictionaries or whatever seems to be the most likely desired input. Try to keep these inputs as pure Python as possible
|
||||
as it makes compatibility easier (even through other languages via JSON). Those will be the :obj:`inputs` of the
|
||||
pipeline (:obj:`preprocess`).
|
||||
|
||||
Then define the :obj:`outputs`. Same policy as the :obj:`inputs`. The simpler, the better. Those will be the outputs of
|
||||
:obj:`postprocess` method.
|
||||
|
||||
Start by inheriting the base class :obj:`Pipeline`. with the 4 methods needed to implement :obj:`preprocess`,
|
||||
:obj:`_forward`, :obj:`postprocess` and :obj:`_sanitize_parameters`.
|
||||
|
||||
|
||||
.. code-block::
|
||||
|
||||
from transformers import Pipeline
|
||||
|
||||
class MyPipeline(Pipeline):
|
||||
def _sanitize_parameters(self, **kwargs):
|
||||
preprocess_kwargs = {}
|
||||
if "maybe_arg" in kwargs:
|
||||
preprocess_kwargs["maybe_arg"] = kwargs["maybe_arg"]
|
||||
return preprocess_kwargs, {}, {}
|
||||
|
||||
def preprocess(self, inputs, maybe_arg=2):
|
||||
model_input = Tensor(....)
|
||||
return {"model_input": model_input}
|
||||
|
||||
def _forward(self, model_inputs):
|
||||
# model_inputs == {"model_input": model_input}
|
||||
outputs = self.model(**model_inputs)
|
||||
# Maybe {"logits": Tensor(...)}
|
||||
return outputs
|
||||
|
||||
def postprocess(self, model_outputs):
|
||||
best_class = model_outputs["logits"].softmax(-1)
|
||||
return best_class
|
||||
|
||||
|
||||
The structure of this breakdown is to support relatively seamless support for CPU/GPU, while supporting doing
|
||||
pre/postprocessing on the CPU on different threads
|
||||
|
||||
:obj:`preprocess` will take the originally defined inputs, and turn them into something feedable to the model. It might
|
||||
contain more information and is usually a :obj:`Dict`.
|
||||
|
||||
:obj:`_forward` is the implementation detail and is not meant to be called directly. :obj:`forward` is the preferred
|
||||
called method as it contains safeguards to make sure everything is working on the expected device. If anything is
|
||||
linked to a real model it belongs in the :obj:`_forward` method, anything else is in the preprocess/postprocess.
|
||||
|
||||
:obj:`postprocess` methods will take the output of :obj:`_forward` and turn it into the final output that were decided
|
||||
earlier.
|
||||
|
||||
:obj:`_sanitize_parameters` exists to allow users to pass any parameters whenever they wish, be it at initialization
|
||||
time ``pipeline(...., maybe_arg=4)`` or at call time ``pipe = pipeline(...); output = pipe(...., maybe_arg=4)``.
|
||||
|
||||
The returns of :obj:`_sanitize_parameters` are the 3 dicts of kwargs that will be passed directly to :obj:`preprocess`,
|
||||
:obj:`_forward` and :obj:`postprocess`. Don't fill anything if the caller didn't call with any extra parameter. That
|
||||
allows to keep the default arguments in the function definition which is always more "natural".
|
||||
|
||||
A classic example would be a :obj:`top_k` argument in the post processing in classification tasks.
|
||||
|
||||
.. code-block::
|
||||
|
||||
>>> pipe = pipeline("my-new-task")
|
||||
>>> pipe("This is a test")
|
||||
[{"label": "1-star", "score": 0.8}, {"label": "2-star", "score": 0.1}, {"label": "3-star", "score": 0.05}
|
||||
{"label": "4-star", "score": 0.025}, {"label": "5-star", "score": 0.025}]
|
||||
|
||||
>>> pipe("This is a test", top_k=2)
|
||||
[{"label": "1-star", "score": 0.8}, {"label": "2-star", "score": 0.1}]
|
||||
|
||||
In order to achieve that, we'll update our :obj:`postprocess` method with a default parameter to :obj:`5`. and edit
|
||||
:obj:`_sanitize_parameters` to allow this new parameter.
|
||||
|
||||
|
||||
.. code-block::
|
||||
|
||||
|
||||
def postprocess(self, model_outputs, top_k=5):
|
||||
best_class = model_outputs["logits"].softmax(-1)
|
||||
# Add logic to handle top_k
|
||||
return best_class
|
||||
|
||||
def _sanitize_parameters(self, **kwargs):
|
||||
preprocess_kwargs = {}
|
||||
if "maybe_arg" in kwargs:
|
||||
preprocess_kwargs["maybe_arg"] = kwargs["maybe_arg"]
|
||||
|
||||
postprocess_kwargs = {}
|
||||
if "top_k" in kwargs:
|
||||
preprocess_kwargs["top_k"] = kwargs["top_k"]
|
||||
return preprocess_kwargs, {}, postprocess_kwargs
|
||||
|
||||
Try to keep the inputs/outputs very simple and ideally JSON-serializable as it makes the pipeline usage very easy
|
||||
without requiring users to understand new kind of objects. It's also relatively common to support many different types
|
||||
of arguments for ease of use (audio files, can be filenames, URLs or pure bytes)
|
||||
|
||||
|
||||
|
||||
Adding it to the list of supported tasks
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Go to ``src/transformers/pipelines/__init__.py`` and fill in :obj:`SUPPORTED_TASKS` with your newly created pipeline.
|
||||
If possible it should provide a default model.
|
||||
|
||||
Adding tests
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Create a new file ``tests/test_pipelines_MY_PIPELINE.py`` with example with the other tests.
|
||||
|
||||
The :obj:`run_pipeline_test` function will be very generic and run on small random models on every possible
|
||||
architecture as defined by :obj:`model_mapping` and :obj:`tf_model_mapping`.
|
||||
|
||||
This is very important to test future compatibility, meaning if someone adds a new model for
|
||||
:obj:`XXXForQuestionAnswering` then the pipeline test will attempt to run on it. Because the models are random it's
|
||||
impossible to check for actual values, that's why There is a helper :obj:`ANY` that will simply attempt to match the
|
||||
output of the pipeline TYPE.
|
||||
|
||||
You also *need* to implement 2 (ideally 4) tests.
|
||||
|
||||
- :obj:`test_small_model_pt` : Define 1 small model for this pipeline (doesn't matter if the results don't make sense)
|
||||
and test the pipeline outputs. The results should be the same as :obj:`test_small_model_tf`.
|
||||
- :obj:`test_small_model_tf` : Define 1 small model for this pipeline (doesn't matter if the results don't make sense)
|
||||
and test the pipeline outputs. The results should be the same as :obj:`test_small_model_pt`.
|
||||
- :obj:`test_large_model_pt` (:obj:`optional`): Tests the pipeline on a real pipeline where the results are supposed to
|
||||
make sense. These tests are slow and should be marked as such. Here the goal is to showcase the pipeline and to make
|
||||
sure there is no drift in future releases
|
||||
- :obj:`test_large_model_tf` (:obj:`optional`): Tests the pipeline on a real pipeline where the results are supposed to
|
||||
make sense. These tests are slow and should be marked as such. Here the goal is to showcase the pipeline and to make
|
||||
sure there is no drift in future releases
|
||||
@ -1,38 +0,0 @@
|
||||
..
|
||||
Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
BERTology
|
||||
-----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
There is a growing field of study concerned with investigating the inner working of large-scale transformers like BERT
|
||||
(that some call "BERTology"). Some good examples of this field are:
|
||||
|
||||
|
||||
* BERT Rediscovers the Classical NLP Pipeline by Ian Tenney, Dipanjan Das, Ellie Pavlick:
|
||||
https://arxiv.org/abs/1905.05950
|
||||
* Are Sixteen Heads Really Better than One? by Paul Michel, Omer Levy, Graham Neubig: https://arxiv.org/abs/1905.10650
|
||||
* What Does BERT Look At? An Analysis of BERT's Attention by Kevin Clark, Urvashi Khandelwal, Omer Levy, Christopher D.
|
||||
Manning: https://arxiv.org/abs/1906.04341
|
||||
|
||||
In order to help this new field develop, we have included a few additional features in the BERT/GPT/GPT-2 models to
|
||||
help people access the inner representations, mainly adapted from the great work of Paul Michel
|
||||
(https://arxiv.org/abs/1905.10650):
|
||||
|
||||
|
||||
* accessing all the hidden-states of BERT/GPT/GPT-2,
|
||||
* accessing all the attention weights for each head of BERT/GPT/GPT-2,
|
||||
* retrieving heads output values and gradients to be able to compute head importance score and prune head as explained
|
||||
in https://arxiv.org/abs/1905.10650.
|
||||
|
||||
To help you understand and use these features, we have added a specific example script: :prefix_link:`bertology.py
|
||||
<examples/research_projects/bertology/run_bertology.py>` while extract information and prune a model pre-trained on
|
||||
GLUE.
|
||||
@ -1 +0,0 @@
|
||||
../../CONTRIBUTING.md
|
||||
@ -1,181 +0,0 @@
|
||||
..
|
||||
Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
Converting Tensorflow Checkpoints
|
||||
=======================================================================================================================
|
||||
|
||||
A command-line interface is provided to convert original Bert/GPT/GPT-2/Transformer-XL/XLNet/XLM checkpoints to models
|
||||
that can be loaded using the ``from_pretrained`` methods of the library.
|
||||
|
||||
.. note::
|
||||
Since 2.3.0 the conversion script is now part of the transformers CLI (**transformers-cli**) available in any
|
||||
transformers >= 2.3.0 installation.
|
||||
|
||||
The documentation below reflects the **transformers-cli convert** command format.
|
||||
|
||||
BERT
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
You can convert any TensorFlow checkpoint for BERT (in particular `the pre-trained models released by Google
|
||||
<https://github.com/google-research/bert#pre-trained-models>`_) in a PyTorch save file by using the
|
||||
:prefix_link:`convert_bert_original_tf_checkpoint_to_pytorch.py
|
||||
<src/transformers/models/bert/convert_bert_original_tf_checkpoint_to_pytorch.py>` script.
|
||||
|
||||
This CLI takes as input a TensorFlow checkpoint (three files starting with ``bert_model.ckpt``) and the associated
|
||||
configuration file (``bert_config.json``), and creates a PyTorch model for this configuration, loads the weights from
|
||||
the TensorFlow checkpoint in the PyTorch model and saves the resulting model in a standard PyTorch save file that can
|
||||
be imported using ``from_pretrained()`` (see example in :doc:`quicktour` , :prefix_link:`run_glue.py
|
||||
<examples/pytorch/text-classification/run_glue.py>` ).
|
||||
|
||||
You only need to run this conversion script **once** to get a PyTorch model. You can then disregard the TensorFlow
|
||||
checkpoint (the three files starting with ``bert_model.ckpt``) but be sure to keep the configuration file (\
|
||||
``bert_config.json``) and the vocabulary file (``vocab.txt``) as these are needed for the PyTorch model too.
|
||||
|
||||
To run this specific conversion script you will need to have TensorFlow and PyTorch installed (``pip install
|
||||
tensorflow``). The rest of the repository only requires PyTorch.
|
||||
|
||||
Here is an example of the conversion process for a pre-trained ``BERT-Base Uncased`` model:
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
export BERT_BASE_DIR=/path/to/bert/uncased_L-12_H-768_A-12
|
||||
|
||||
transformers-cli convert --model_type bert \
|
||||
--tf_checkpoint $BERT_BASE_DIR/bert_model.ckpt \
|
||||
--config $BERT_BASE_DIR/bert_config.json \
|
||||
--pytorch_dump_output $BERT_BASE_DIR/pytorch_model.bin
|
||||
|
||||
You can download Google's pre-trained models for the conversion `here
|
||||
<https://github.com/google-research/bert#pre-trained-models>`__.
|
||||
|
||||
ALBERT
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Convert TensorFlow model checkpoints of ALBERT to PyTorch using the
|
||||
:prefix_link:`convert_albert_original_tf_checkpoint_to_pytorch.py
|
||||
<src/transformers/models/albert/convert_albert_original_tf_checkpoint_to_pytorch.py>` script.
|
||||
|
||||
The CLI takes as input a TensorFlow checkpoint (three files starting with ``model.ckpt-best``) and the accompanying
|
||||
configuration file (``albert_config.json``), then creates and saves a PyTorch model. To run this conversion you will
|
||||
need to have TensorFlow and PyTorch installed.
|
||||
|
||||
Here is an example of the conversion process for the pre-trained ``ALBERT Base`` model:
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
export ALBERT_BASE_DIR=/path/to/albert/albert_base
|
||||
|
||||
transformers-cli convert --model_type albert \
|
||||
--tf_checkpoint $ALBERT_BASE_DIR/model.ckpt-best \
|
||||
--config $ALBERT_BASE_DIR/albert_config.json \
|
||||
--pytorch_dump_output $ALBERT_BASE_DIR/pytorch_model.bin
|
||||
|
||||
You can download Google's pre-trained models for the conversion `here
|
||||
<https://github.com/google-research/albert#pre-trained-models>`__.
|
||||
|
||||
OpenAI GPT
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Here is an example of the conversion process for a pre-trained OpenAI GPT model, assuming that your NumPy checkpoint
|
||||
save as the same format than OpenAI pretrained model (see `here <https://github.com/openai/finetune-transformer-lm>`__\
|
||||
)
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
export OPENAI_GPT_CHECKPOINT_FOLDER_PATH=/path/to/openai/pretrained/numpy/weights
|
||||
|
||||
transformers-cli convert --model_type gpt \
|
||||
--tf_checkpoint $OPENAI_GPT_CHECKPOINT_FOLDER_PATH \
|
||||
--pytorch_dump_output $PYTORCH_DUMP_OUTPUT \
|
||||
[--config OPENAI_GPT_CONFIG] \
|
||||
[--finetuning_task_name OPENAI_GPT_FINETUNED_TASK] \
|
||||
|
||||
|
||||
OpenAI GPT-2
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Here is an example of the conversion process for a pre-trained OpenAI GPT-2 model (see `here
|
||||
<https://github.com/openai/gpt-2>`__)
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
export OPENAI_GPT2_CHECKPOINT_PATH=/path/to/gpt2/pretrained/weights
|
||||
|
||||
transformers-cli convert --model_type gpt2 \
|
||||
--tf_checkpoint $OPENAI_GPT2_CHECKPOINT_PATH \
|
||||
--pytorch_dump_output $PYTORCH_DUMP_OUTPUT \
|
||||
[--config OPENAI_GPT2_CONFIG] \
|
||||
[--finetuning_task_name OPENAI_GPT2_FINETUNED_TASK]
|
||||
|
||||
Transformer-XL
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Here is an example of the conversion process for a pre-trained Transformer-XL model (see `here
|
||||
<https://github.com/kimiyoung/transformer-xl/tree/master/tf#obtain-and-evaluate-pretrained-sota-models>`__)
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
export TRANSFO_XL_CHECKPOINT_FOLDER_PATH=/path/to/transfo/xl/checkpoint
|
||||
|
||||
transformers-cli convert --model_type transfo_xl \
|
||||
--tf_checkpoint $TRANSFO_XL_CHECKPOINT_FOLDER_PATH \
|
||||
--pytorch_dump_output $PYTORCH_DUMP_OUTPUT \
|
||||
[--config TRANSFO_XL_CONFIG] \
|
||||
[--finetuning_task_name TRANSFO_XL_FINETUNED_TASK]
|
||||
|
||||
|
||||
XLNet
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Here is an example of the conversion process for a pre-trained XLNet model:
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
export TRANSFO_XL_CHECKPOINT_PATH=/path/to/xlnet/checkpoint
|
||||
export TRANSFO_XL_CONFIG_PATH=/path/to/xlnet/config
|
||||
|
||||
transformers-cli convert --model_type xlnet \
|
||||
--tf_checkpoint $TRANSFO_XL_CHECKPOINT_PATH \
|
||||
--config $TRANSFO_XL_CONFIG_PATH \
|
||||
--pytorch_dump_output $PYTORCH_DUMP_OUTPUT \
|
||||
[--finetuning_task_name XLNET_FINETUNED_TASK] \
|
||||
|
||||
|
||||
XLM
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Here is an example of the conversion process for a pre-trained XLM model:
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
export XLM_CHECKPOINT_PATH=/path/to/xlm/checkpoint
|
||||
|
||||
transformers-cli convert --model_type xlm \
|
||||
--tf_checkpoint $XLM_CHECKPOINT_PATH \
|
||||
--pytorch_dump_output $PYTORCH_DUMP_OUTPUT
|
||||
[--config XML_CONFIG] \
|
||||
[--finetuning_task_name XML_FINETUNED_TASK]
|
||||
|
||||
|
||||
T5
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Here is an example of the conversion process for a pre-trained T5 model:
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
export T5=/path/to/t5/uncased_L-12_H-768_A-12
|
||||
|
||||
transformers-cli convert --model_type t5 \
|
||||
--tf_checkpoint $T5/t5_model.ckpt \
|
||||
--config $T5/t5_config.json \
|
||||
--pytorch_dump_output $T5/pytorch_model.bin
|
||||
@ -1,681 +0,0 @@
|
||||
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
# How to fine-tune a model for common downstream tasks
|
||||
|
||||
[[open-in-colab]]
|
||||
|
||||
This guide will show you how to fine-tune 🤗 Transformers models for common downstream tasks. You will use the 🤗
|
||||
Datasets library to quickly load and preprocess the datasets, getting them ready for training with PyTorch and
|
||||
TensorFlow.
|
||||
|
||||
Before you begin, make sure you have the 🤗 Datasets library installed. For more detailed installation instructions,
|
||||
refer to the 🤗 Datasets [installation page](https://huggingface.co/docs/datasets/installation.html). All of the
|
||||
examples in this guide will use 🤗 Datasets to load and preprocess a dataset.
|
||||
|
||||
```bash
|
||||
pip install datasets
|
||||
```
|
||||
|
||||
Learn how to fine-tune a model for:
|
||||
|
||||
- [seq_imdb](#seq_imdb)
|
||||
- [tok_ner](#tok_ner)
|
||||
- [qa_squad](#qa_squad)
|
||||
|
||||
<a id='seq_imdb'></a>
|
||||
|
||||
## Sequence classification with IMDb reviews
|
||||
|
||||
Sequence classification refers to the task of classifying sequences of text according to a given number of classes. In
|
||||
this example, learn how to fine-tune a model on the [IMDb dataset](https://huggingface.co/datasets/imdb) to determine
|
||||
whether a review is positive or negative.
|
||||
|
||||
<Tip>
|
||||
|
||||
For a more in-depth example of how to fine-tune a model for text classification, take a look at the corresponding
|
||||
[PyTorch notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/text_classification.ipynb)
|
||||
or [TensorFlow notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/text_classification-tf.ipynb).
|
||||
|
||||
</Tip>
|
||||
|
||||
### Load IMDb dataset
|
||||
|
||||
The 🤗 Datasets library makes it simple to load a dataset:
|
||||
|
||||
```python
|
||||
from datasets import load_dataset
|
||||
imdb = load_dataset("imdb")
|
||||
```
|
||||
|
||||
This loads a `DatasetDict` object which you can index into to view an example:
|
||||
|
||||
```python
|
||||
imdb["train"][0]
|
||||
{'label': 1,
|
||||
'text': 'Bromwell High is a cartoon comedy. It ran at the same time as some other programs about school life, such as "Teachers". My 35 years in the teaching profession lead me to believe that Bromwell High\'s satire is much closer to reality than is "Teachers". The scramble to survive financially, the insightful students who can see right through their pathetic teachers\' pomp, the pettiness of the whole situation, all remind me of the schools I knew and their students. When I saw the episode in which a student repeatedly tried to burn down the school, I immediately recalled ......... at .......... High. A classic line: INSPECTOR: I\'m here to sack one of your teachers. STUDENT: Welcome to Bromwell High. I expect that many adults of my age think that Bromwell High is far fetched. What a pity that it isn\'t!'
|
||||
}
|
||||
```
|
||||
|
||||
### Preprocess
|
||||
|
||||
The next step is to tokenize the text into a readable format by the model. It is important to load the same tokenizer a
|
||||
model was trained with to ensure appropriately tokenized words. Load the DistilBERT tokenizer with the
|
||||
[`AutoTokenizer`] because we will eventually train a classifier using a pretrained [DistilBERT](https://huggingface.co/distilbert-base-uncased) model:
|
||||
|
||||
```python
|
||||
from transformers import AutoTokenizer
|
||||
tokenizer = AutoTokenizer.from_pretrained("distilbert-base-uncased")
|
||||
```
|
||||
|
||||
Now that you have instantiated a tokenizer, create a function that will tokenize the text. You should also truncate
|
||||
longer sequences in the text to be no longer than the model's maximum input length:
|
||||
|
||||
```python
|
||||
def preprocess_function(examples):
|
||||
return tokenizer(examples["text"], truncation=True)
|
||||
```
|
||||
|
||||
Use 🤗 Datasets `map` function to apply the preprocessing function to the entire dataset. You can also set
|
||||
`batched=True` to apply the preprocessing function to multiple elements of the dataset at once for faster
|
||||
preprocessing:
|
||||
|
||||
```python
|
||||
tokenized_imdb = imdb.map(preprocess_function, batched=True)
|
||||
```
|
||||
|
||||
Lastly, pad your text so they are a uniform length. While it is possible to pad your text in the `tokenizer` function
|
||||
by setting `padding=True`, it is more efficient to only pad the text to the length of the longest element in its
|
||||
batch. This is known as **dynamic padding**. You can do this with the `DataCollatorWithPadding` function:
|
||||
|
||||
```python
|
||||
from transformers import DataCollatorWithPadding
|
||||
data_collator = DataCollatorWithPadding(tokenizer=tokenizer)
|
||||
```
|
||||
|
||||
### Fine-tune with the Trainer API
|
||||
|
||||
Now load your model with the [`AutoModelForSequenceClassification`] class along with the number of expected labels:
|
||||
|
||||
```python
|
||||
from transformers import AutoModelForSequenceClassification
|
||||
model = AutoModelForSequenceClassification.from_pretrained("distilbert-base-uncased", num_labels=2)
|
||||
```
|
||||
|
||||
At this point, only three steps remain:
|
||||
|
||||
1. Define your training hyperparameters in [`TrainingArguments`].
|
||||
2. Pass the training arguments to a [`Trainer`] along with the model, dataset, tokenizer, and data collator.
|
||||
3. Call [`Trainer.train()`] to fine-tune your model.
|
||||
|
||||
```python
|
||||
from transformers import TrainingArguments, Trainer
|
||||
|
||||
training_args = TrainingArguments(
|
||||
output_dir='./results',
|
||||
learning_rate=2e-5,
|
||||
per_device_train_batch_size=16,
|
||||
per_device_eval_batch_size=16,
|
||||
num_train_epochs=5,
|
||||
weight_decay=0.01,
|
||||
)
|
||||
|
||||
trainer = Trainer(
|
||||
model=model,
|
||||
args=training_args,
|
||||
train_dataset=tokenized_imdb["train"],
|
||||
eval_dataset=tokenized_imdb["test"],
|
||||
tokenizer=tokenizer,
|
||||
data_collator=data_collator,
|
||||
)
|
||||
|
||||
trainer.train()
|
||||
```
|
||||
|
||||
### Fine-tune with TensorFlow
|
||||
|
||||
Fine-tuning with TensorFlow is just as easy, with only a few differences.
|
||||
|
||||
Start by batching the processed examples together with dynamic padding using the [`DataCollatorWithPadding`] function.
|
||||
Make sure you set `return_tensors="tf"` to return `tf.Tensor` outputs instead of PyTorch tensors!
|
||||
|
||||
```python
|
||||
from transformers import DataCollatorWithPadding
|
||||
data_collator = DataCollatorWithPadding(tokenizer, return_tensors="tf")
|
||||
```
|
||||
|
||||
Next, convert your datasets to the `tf.data.Dataset` format with `to_tf_dataset`. Specify inputs and labels in the
|
||||
`columns` argument:
|
||||
|
||||
```python
|
||||
tf_train_dataset = tokenized_imdb["train"].to_tf_dataset(
|
||||
columns=['attention_mask', 'input_ids', 'label'],
|
||||
shuffle=True,
|
||||
batch_size=16,
|
||||
collate_fn=data_collator,
|
||||
)
|
||||
|
||||
tf_validation_dataset = tokenized_imdb["train"].to_tf_dataset(
|
||||
columns=['attention_mask', 'input_ids', 'label'],
|
||||
shuffle=False,
|
||||
batch_size=16,
|
||||
collate_fn=data_collator,
|
||||
)
|
||||
```
|
||||
|
||||
Set up an optimizer function, learning rate schedule, and some training hyperparameters:
|
||||
|
||||
```python
|
||||
from transformers import create_optimizer
|
||||
import tensorflow as tf
|
||||
|
||||
batch_size = 16
|
||||
num_epochs = 5
|
||||
batches_per_epoch = len(tokenized_imdb["train"]) // batch_size
|
||||
total_train_steps = int(batches_per_epoch * num_epochs)
|
||||
optimizer, schedule = create_optimizer(
|
||||
init_lr=2e-5,
|
||||
num_warmup_steps=0,
|
||||
num_train_steps=total_train_steps
|
||||
)
|
||||
```
|
||||
|
||||
Load your model with the [`TFAutoModelForSequenceClassification`] class along with the number of expected labels:
|
||||
|
||||
```python
|
||||
from transformers import TFAutoModelForSequenceClassification
|
||||
model = TFAutoModelForSequenceClassification.from_pretrained("distilbert-base-uncased", num_labels=2)
|
||||
```
|
||||
|
||||
Compile the model:
|
||||
|
||||
```python
|
||||
import tensorflow as tf
|
||||
model.compile(optimizer=optimizer)
|
||||
```
|
||||
|
||||
Finally, fine-tune the model by calling `model.fit`:
|
||||
|
||||
```python
|
||||
model.fit(
|
||||
tf_train_set,
|
||||
validation_data=tf_validation_set,
|
||||
epochs=num_train_epochs,
|
||||
)
|
||||
```
|
||||
|
||||
<a id='tok_ner'></a>
|
||||
|
||||
## Token classification with WNUT emerging entities
|
||||
|
||||
Token classification refers to the task of classifying individual tokens in a sentence. One of the most common token
|
||||
classification tasks is Named Entity Recognition (NER). NER attempts to find a label for each entity in a sentence,
|
||||
such as a person, location, or organization. In this example, learn how to fine-tune a model on the [WNUT 17](https://huggingface.co/datasets/wnut_17) dataset to detect new entities.
|
||||
|
||||
<Tip>
|
||||
|
||||
For a more in-depth example of how to fine-tune a model for token classification, take a look at the corresponding
|
||||
[PyTorch notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/token_classification.ipynb)
|
||||
or [TensorFlow notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/token_classification-tf.ipynb).
|
||||
|
||||
</Tip>
|
||||
|
||||
### Load WNUT 17 dataset
|
||||
|
||||
Load the WNUT 17 dataset from the 🤗 Datasets library:
|
||||
|
||||
```python
|
||||
from datasets import load_dataset
|
||||
wnut = load_dataset("wnut_17")
|
||||
```
|
||||
|
||||
A quick look at the dataset shows the labels associated with each word in the sentence:
|
||||
|
||||
```python
|
||||
wnut["train"][0]
|
||||
{'id': '0',
|
||||
'ner_tags': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 8, 8, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0],
|
||||
'tokens': ['@paulwalk', 'It', "'s", 'the', 'view', 'from', 'where', 'I', "'m", 'living', 'for', 'two', 'weeks', '.', 'Empire', 'State', 'Building', '=', 'ESB', '.', 'Pretty', 'bad', 'storm', 'here', 'last', 'evening', '.']
|
||||
}
|
||||
```
|
||||
|
||||
View the specific NER tags by:
|
||||
|
||||
```python
|
||||
label_list = wnut["train"].features[f"ner_tags"].feature.names
|
||||
label_list
|
||||
['O',
|
||||
'B-corporation',
|
||||
'I-corporation',
|
||||
'B-creative-work',
|
||||
'I-creative-work',
|
||||
'B-group',
|
||||
'I-group',
|
||||
'B-location',
|
||||
'I-location',
|
||||
'B-person',
|
||||
'I-person',
|
||||
'B-product',
|
||||
'I-product'
|
||||
]
|
||||
```
|
||||
|
||||
A letter prefixes each NER tag which can mean:
|
||||
|
||||
- `B-` indicates the beginning of an entity.
|
||||
- `I-` indicates a token is contained inside the same entity (e.g., the `State` token is a part of an entity like
|
||||
`Empire State Building`).
|
||||
- `0` indicates the token doesn't correspond to any entity.
|
||||
|
||||
### Preprocess
|
||||
|
||||
Now you need to tokenize the text. Load the DistilBERT tokenizer with an [`AutoTokenizer`]:
|
||||
|
||||
```python
|
||||
from transformers import AutoTokenizer
|
||||
tokenizer = AutoTokenizer.from_pretrained("distilbert-base-uncased")
|
||||
```
|
||||
|
||||
Since the input has already been split into words, set `is_split_into_words=True` to tokenize the words into
|
||||
subwords:
|
||||
|
||||
```python
|
||||
tokenized_input = tokenizer(example["tokens"], is_split_into_words=True)
|
||||
tokens = tokenizer.convert_ids_to_tokens(tokenized_input["input_ids"])
|
||||
tokens
|
||||
['[CLS]', '@', 'paul', '##walk', 'it', "'", 's', 'the', 'view', 'from', 'where', 'i', "'", 'm', 'living', 'for', 'two', 'weeks', '.', 'empire', 'state', 'building', '=', 'es', '##b', '.', 'pretty', 'bad', 'storm', 'here', 'last', 'evening', '.', '[SEP]']
|
||||
```
|
||||
|
||||
The addition of the special tokens `[CLS]` and `[SEP]` and subword tokenization creates a mismatch between the
|
||||
input and labels. Realign the labels and tokens by:
|
||||
|
||||
1. Mapping all tokens to their corresponding word with the `word_ids` method.
|
||||
2. Assigning the label `-100` to the special tokens `[CLS]` and ``[SEP]``` so the PyTorch loss function ignores
|
||||
them.
|
||||
3. Only labeling the first token of a given word. Assign `-100` to the other subtokens from the same word.
|
||||
|
||||
Here is how you can create a function that will realign the labels and tokens:
|
||||
|
||||
```python
|
||||
def tokenize_and_align_labels(examples):
|
||||
tokenized_inputs = tokenizer(examples["tokens"], truncation=True, is_split_into_words=True)
|
||||
|
||||
labels = []
|
||||
for i, label in enumerate(examples[f"ner_tags"]):
|
||||
word_ids = tokenized_inputs.word_ids(batch_index=i) # Map tokens to their respective word.
|
||||
previous_word_idx = None
|
||||
label_ids = []
|
||||
for word_idx in word_ids: # Set the special tokens to -100.
|
||||
if word_idx is None:
|
||||
label_ids.append(-100)
|
||||
elif word_idx != previous_word_idx: # Only label the first token of a given word.
|
||||
label_ids.append(label[word_idx])
|
||||
|
||||
labels.append(label_ids)
|
||||
|
||||
tokenized_inputs["labels"] = labels
|
||||
return tokenized_inputs
|
||||
```
|
||||
|
||||
Now tokenize and align the labels over the entire dataset with 🤗 Datasets `map` function:
|
||||
|
||||
```python
|
||||
tokenized_wnut = wnut.map(tokenize_and_align_labels, batched=True)
|
||||
```
|
||||
|
||||
Finally, pad your text and labels, so they are a uniform length:
|
||||
|
||||
```python
|
||||
from transformers import DataCollatorForTokenClassification
|
||||
data_collator = DataCollatorForTokenClassification(tokenizer)
|
||||
```
|
||||
|
||||
### Fine-tune with the Trainer API
|
||||
|
||||
Load your model with the [`AutoModelForTokenClassification`] class along with the number of expected labels:
|
||||
|
||||
```python
|
||||
from transformers import AutoModelForTokenClassification, TrainingArguments, Trainer
|
||||
model = AutoModelForTokenClassification.from_pretrained("distilbert-base-uncased", num_labels=len(label_list))
|
||||
```
|
||||
|
||||
Gather your training arguments in [`TrainingArguments`]:
|
||||
|
||||
```python
|
||||
training_args = TrainingArguments(
|
||||
output_dir='./results',
|
||||
evaluation_strategy="epoch",
|
||||
learning_rate=2e-5,
|
||||
per_device_train_batch_size=16,
|
||||
per_device_eval_batch_size=16,
|
||||
num_train_epochs=3,
|
||||
weight_decay=0.01,
|
||||
)
|
||||
```
|
||||
|
||||
Collect your model, training arguments, dataset, data collator, and tokenizer in [`Trainer`]:
|
||||
|
||||
```python
|
||||
trainer = Trainer(
|
||||
model=model,
|
||||
args=training_args,
|
||||
train_dataset=tokenized_wnut["train"],
|
||||
eval_dataset=tokenized_wnut["test"],
|
||||
data_collator=data_collator,
|
||||
tokenizer=tokenizer,
|
||||
)
|
||||
```
|
||||
|
||||
Fine-tune your model:
|
||||
|
||||
```python
|
||||
trainer.train()
|
||||
```
|
||||
|
||||
### Fine-tune with TensorFlow
|
||||
|
||||
Batch your examples together and pad your text and labels, so they are a uniform length:
|
||||
|
||||
```python
|
||||
from transformers import DataCollatorForTokenClassification
|
||||
data_collator = DataCollatorForTokenClassification(tokenizer, return_tensors="tf")
|
||||
```
|
||||
|
||||
Convert your datasets to the `tf.data.Dataset` format with `to_tf_dataset`:
|
||||
|
||||
```python
|
||||
tf_train_set = tokenized_wnut["train"].to_tf_dataset(
|
||||
columns=["attention_mask", "input_ids", "labels"],
|
||||
shuffle=True,
|
||||
batch_size=16,
|
||||
collate_fn=data_collator,
|
||||
)
|
||||
|
||||
tf_validation_set = tokenized_wnut["validation"].to_tf_dataset(
|
||||
columns=["attention_mask", "input_ids", "labels"],
|
||||
shuffle=False,
|
||||
batch_size=16,
|
||||
collate_fn=data_collator,
|
||||
)
|
||||
```
|
||||
|
||||
Load the model with the [`TFAutoModelForTokenClassification`] class along with the number of expected labels:
|
||||
|
||||
```python
|
||||
from transformers import TFAutoModelForTokenClassification
|
||||
model = TFAutoModelForTokenClassification.from_pretrained("distilbert-base-uncased", num_labels=len(label_list))
|
||||
```
|
||||
|
||||
Set up an optimizer function, learning rate schedule, and some training hyperparameters:
|
||||
|
||||
```python
|
||||
from transformers import create_optimizer
|
||||
|
||||
batch_size = 16
|
||||
num_train_epochs = 3
|
||||
num_train_steps = (len(tokenized_datasets["train"]) // batch_size) * num_train_epochs
|
||||
optimizer, lr_schedule = create_optimizer(
|
||||
init_lr=2e-5,
|
||||
num_train_steps=num_train_steps,
|
||||
weight_decay_rate=0.01,
|
||||
num_warmup_steps=0,
|
||||
)
|
||||
```
|
||||
|
||||
Compile the model:
|
||||
|
||||
```python
|
||||
import tensorflow as tf
|
||||
model.compile(optimizer=optimizer)
|
||||
```
|
||||
|
||||
Call `model.fit` to fine-tune your model:
|
||||
|
||||
```python
|
||||
model.fit(
|
||||
tf_train_set,
|
||||
validation_data=tf_validation_set,
|
||||
epochs=num_train_epochs,
|
||||
)
|
||||
```
|
||||
|
||||
<a id='qa_squad'></a>
|
||||
|
||||
## Question Answering with SQuAD
|
||||
|
||||
There are many types of question answering (QA) tasks. Extractive QA focuses on identifying the answer from the text
|
||||
given a question. In this example, learn how to fine-tune a model on the [SQuAD](https://huggingface.co/datasets/squad) dataset.
|
||||
|
||||
<Tip>
|
||||
|
||||
For a more in-depth example of how to fine-tune a model for question answering, take a look at the corresponding
|
||||
[PyTorch notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/question_answering.ipynb)
|
||||
or [TensorFlow notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/question_answering-tf.ipynb).
|
||||
|
||||
</Tip>
|
||||
|
||||
### Load SQuAD dataset
|
||||
|
||||
Load the SQuAD dataset from the 🤗 Datasets library:
|
||||
|
||||
```python
|
||||
from datasets import load_dataset
|
||||
squad = load_dataset("squad")
|
||||
```
|
||||
|
||||
Take a look at an example from the dataset:
|
||||
|
||||
```python
|
||||
squad["train"][0]
|
||||
{'answers': {'answer_start': [515], 'text': ['Saint Bernadette Soubirous']},
|
||||
'context': 'Architecturally, the school has a Catholic character. Atop the Main Building\'s gold dome is a golden statue of the Virgin Mary. Immediately in front of the Main Building and facing it, is a copper statue of Christ with arms upraised with the legend "Venite Ad Me Omnes". Next to the Main Building is the Basilica of the Sacred Heart. Immediately behind the basilica is the Grotto, a Marian place of prayer and reflection. It is a replica of the grotto at Lourdes, France where the Virgin Mary reputedly appeared to Saint Bernadette Soubirous in 1858. At the end of the main drive (and in a direct line that connects through 3 statues and the Gold Dome), is a simple, modern stone statue of Mary.',
|
||||
'id': '5733be284776f41900661182',
|
||||
'question': 'To whom did the Virgin Mary allegedly appear in 1858 in Lourdes France?',
|
||||
'title': 'University_of_Notre_Dame'
|
||||
}
|
||||
```
|
||||
|
||||
### Preprocess
|
||||
|
||||
Load the DistilBERT tokenizer with an [`AutoTokenizer`]:
|
||||
|
||||
```python
|
||||
from transformers import AutoTokenizer
|
||||
tokenizer = AutoTokenizer.from_pretrained("distilbert-base-uncased")
|
||||
```
|
||||
|
||||
There are a few things to be aware of when preprocessing text for question answering:
|
||||
|
||||
1. Some examples in a dataset may have a very long `context` that exceeds the maximum input length of the model. You
|
||||
can deal with this by truncating the `context` and set `truncation="only_second"`.
|
||||
2. Next, you need to map the start and end positions of the answer to the original context. Set
|
||||
`return_offset_mapping=True` to handle this.
|
||||
3. With the mapping in hand, you can find the start and end tokens of the answer. Use the `sequence_ids` method to
|
||||
find which part of the offset corresponds to the question, and which part of the offset corresponds to the context.
|
||||
|
||||
Assemble everything in a preprocessing function as shown below:
|
||||
|
||||
```python
|
||||
def preprocess_function(examples):
|
||||
questions = [q.strip() for q in examples["question"]]
|
||||
inputs = tokenizer(
|
||||
questions,
|
||||
examples["context"],
|
||||
max_length=384,
|
||||
truncation="only_second",
|
||||
return_offsets_mapping=True,
|
||||
padding="max_length",
|
||||
)
|
||||
|
||||
offset_mapping = inputs.pop("offset_mapping")
|
||||
answers = examples["answers"]
|
||||
start_positions = []
|
||||
end_positions = []
|
||||
|
||||
for i, offset in enumerate(offset_mapping):
|
||||
answer = answers[i]
|
||||
start_char = answer["answer_start"][0]
|
||||
end_char = answer["answer_start"][0] + len(answer["text"][0])
|
||||
sequence_ids = inputs.sequence_ids(i)
|
||||
|
||||
# Find the start and end of the context
|
||||
idx = 0
|
||||
while sequence_ids[idx] != 1:
|
||||
idx += 1
|
||||
context_start = idx
|
||||
while sequence_ids[idx] == 1:
|
||||
idx += 1
|
||||
context_end = idx - 1
|
||||
|
||||
# If the answer is not fully inside the context, label it (0, 0)
|
||||
if offset[context_start][0] > end_char or offset[context_end][1] < start_char:
|
||||
start_positions.append(0)
|
||||
end_positions.append(0)
|
||||
else:
|
||||
# Otherwise it's the start and end token positions
|
||||
idx = context_start
|
||||
while idx <= context_end and offset[idx][0] <= start_char:
|
||||
idx += 1
|
||||
start_positions.append(idx - 1)
|
||||
|
||||
idx = context_end
|
||||
while idx >= context_start and offset[idx][1] >= end_char:
|
||||
idx -= 1
|
||||
end_positions.append(idx + 1)
|
||||
|
||||
inputs["start_positions"] = start_positions
|
||||
inputs["end_positions"] = end_positions
|
||||
return inputs
|
||||
```
|
||||
|
||||
Apply the preprocessing function over the entire dataset with 🤗 Datasets `map` function:
|
||||
|
||||
```python
|
||||
tokenized_squad = squad.map(preprocess_function, batched=True, remove_columns=squad["train"].column_names)
|
||||
```
|
||||
|
||||
Batch the processed examples together:
|
||||
|
||||
```python
|
||||
from transformers import default_data_collator
|
||||
data_collator = default_data_collator
|
||||
```
|
||||
|
||||
### Fine-tune with the Trainer API
|
||||
|
||||
Load your model with the [`AutoModelForQuestionAnswering`] class:
|
||||
|
||||
```python
|
||||
from transformers import AutoModelForQuestionAnswering, TrainingArguments, Trainer
|
||||
model = AutoModelForQuestionAnswering.from_pretrained("distilbert-base-uncased")
|
||||
```
|
||||
|
||||
Gather your training arguments in [`TrainingArguments`]:
|
||||
|
||||
```python
|
||||
training_args = TrainingArguments(
|
||||
output_dir='./results',
|
||||
evaluation_strategy="epoch",
|
||||
learning_rate=2e-5,
|
||||
per_device_train_batch_size=16,
|
||||
per_device_eval_batch_size=16,
|
||||
num_train_epochs=3,
|
||||
weight_decay=0.01,
|
||||
)
|
||||
```
|
||||
|
||||
Collect your model, training arguments, dataset, data collator, and tokenizer in [`Trainer`]:
|
||||
|
||||
```python
|
||||
trainer = Trainer(
|
||||
model=model,
|
||||
args=training_args,
|
||||
train_dataset=tokenized_squad["train"],
|
||||
eval_dataset=tokenized_squad["validation"],
|
||||
data_collator=data_collator,
|
||||
tokenizer=tokenizer,
|
||||
)
|
||||
```
|
||||
|
||||
Fine-tune your model:
|
||||
|
||||
```python
|
||||
trainer.train()
|
||||
```
|
||||
|
||||
### Fine-tune with TensorFlow
|
||||
|
||||
Batch the processed examples together with a TensorFlow default data collator:
|
||||
|
||||
```python
|
||||
from transformers.data.data_collator import tf_default_collator
|
||||
data_collator = tf_default_collator
|
||||
```
|
||||
|
||||
Convert your datasets to the `tf.data.Dataset` format with the `to_tf_dataset` function:
|
||||
|
||||
```python
|
||||
tf_train_set = tokenized_squad["train"].to_tf_dataset(
|
||||
columns=["attention_mask", "input_ids", "start_positions", "end_positions"],
|
||||
dummy_labels=True,
|
||||
shuffle=True,
|
||||
batch_size=16,
|
||||
collate_fn=data_collator,
|
||||
)
|
||||
|
||||
tf_validation_set = tokenized_squad["validation"].to_tf_dataset(
|
||||
columns=["attention_mask", "input_ids", "start_positions", "end_positions"],
|
||||
dummy_labels=True,
|
||||
shuffle=False,
|
||||
batch_size=16,
|
||||
collate_fn=data_collator,
|
||||
)
|
||||
```
|
||||
|
||||
Set up an optimizer function, learning rate schedule, and some training hyperparameters:
|
||||
|
||||
```python
|
||||
from transformers import create_optimizer
|
||||
|
||||
batch_size = 16
|
||||
num_epochs = 2
|
||||
total_train_steps = (len(tokenized_squad["train"]) // batch_size) * num_epochs
|
||||
optimizer, schedule = create_optimizer(
|
||||
init_lr=2e-5,
|
||||
num_warmup_steps=0,
|
||||
num_train_steps=total_train_steps,
|
||||
)
|
||||
```
|
||||
|
||||
Load your model with the [`TFAutoModelForQuestionAnswering`] class:
|
||||
|
||||
```python
|
||||
from transformers import TFAutoModelForQuestionAnswering
|
||||
model = TFAutoModelForQuestionAnswering("distilbert-base-uncased")
|
||||
```
|
||||
|
||||
Compile the model:
|
||||
|
||||
```python
|
||||
import tensorflow as tf
|
||||
model.compile(optimizer=optimizer)
|
||||
```
|
||||
|
||||
Call `model.fit` to fine-tune the model:
|
||||
|
||||
```python
|
||||
model.fit(
|
||||
tf_train_set,
|
||||
validation_data=tf_validation_set,
|
||||
epochs=num_train_epochs,
|
||||
)
|
||||
```
|
||||
@ -1,299 +0,0 @@
|
||||
..
|
||||
Copyright 2021 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
|
||||
|
||||
Debugging
|
||||
=======================================================================================================================
|
||||
|
||||
Underflow and Overflow Detection
|
||||
-----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
.. note::
|
||||
|
||||
This feature is currently available for PyTorch-only.
|
||||
|
||||
.. note::
|
||||
|
||||
For multi-GPU training it requires DDP (``torch.distributed.launch``).
|
||||
|
||||
.. note::
|
||||
|
||||
This feature can be used with any ``nn.Module``-based model.
|
||||
|
||||
If you start getting ``loss=NaN`` or the model inhibits some other abnormal behavior due to ``inf`` or ``nan`` in
|
||||
activations or weights one needs to discover where the first underflow or overflow happens and what led to it. Luckily
|
||||
you can accomplish that easily by activating a special module that will do the detection automatically.
|
||||
|
||||
If you're using :class:`~transformers.Trainer`, you just need to add:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
--debug underflow_overflow
|
||||
|
||||
to the normal command line arguments, or pass ``debug="underflow_overflow"`` when creating the
|
||||
:class:`~transformers.TrainingArguments` object.
|
||||
|
||||
If you're using your own training loop or another Trainer you can accomplish the same with:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from .debug_utils import DebugUnderflowOverflow
|
||||
debug_overflow = DebugUnderflowOverflow(model)
|
||||
|
||||
:class:`~transformers.debug_utils.DebugUnderflowOverflow` inserts hooks into the model that immediately after each
|
||||
forward call will test input and output variables and also the corresponding module's weights. As soon as ``inf`` or
|
||||
``nan`` is detected in at least one element of the activations or weights, the program will assert and print a report
|
||||
like this (this was caught with ``google/mt5-small`` under fp16 mixed precision):
|
||||
|
||||
.. code-block::
|
||||
|
||||
Detected inf/nan during batch_number=0
|
||||
Last 21 forward frames:
|
||||
abs min abs max metadata
|
||||
encoder.block.1.layer.1.DenseReluDense.dropout Dropout
|
||||
0.00e+00 2.57e+02 input[0]
|
||||
0.00e+00 2.85e+02 output
|
||||
[...]
|
||||
encoder.block.2.layer.0 T5LayerSelfAttention
|
||||
6.78e-04 3.15e+03 input[0]
|
||||
2.65e-04 3.42e+03 output[0]
|
||||
None output[1]
|
||||
2.25e-01 1.00e+04 output[2]
|
||||
encoder.block.2.layer.1.layer_norm T5LayerNorm
|
||||
8.69e-02 4.18e-01 weight
|
||||
2.65e-04 3.42e+03 input[0]
|
||||
1.79e-06 4.65e+00 output
|
||||
encoder.block.2.layer.1.DenseReluDense.wi_0 Linear
|
||||
2.17e-07 4.50e+00 weight
|
||||
1.79e-06 4.65e+00 input[0]
|
||||
2.68e-06 3.70e+01 output
|
||||
encoder.block.2.layer.1.DenseReluDense.wi_1 Linear
|
||||
8.08e-07 2.66e+01 weight
|
||||
1.79e-06 4.65e+00 input[0]
|
||||
1.27e-04 2.37e+02 output
|
||||
encoder.block.2.layer.1.DenseReluDense.dropout Dropout
|
||||
0.00e+00 8.76e+03 input[0]
|
||||
0.00e+00 9.74e+03 output
|
||||
encoder.block.2.layer.1.DenseReluDense.wo Linear
|
||||
1.01e-06 6.44e+00 weight
|
||||
0.00e+00 9.74e+03 input[0]
|
||||
3.18e-04 6.27e+04 output
|
||||
encoder.block.2.layer.1.DenseReluDense T5DenseGatedGeluDense
|
||||
1.79e-06 4.65e+00 input[0]
|
||||
3.18e-04 6.27e+04 output
|
||||
encoder.block.2.layer.1.dropout Dropout
|
||||
3.18e-04 6.27e+04 input[0]
|
||||
0.00e+00 inf output
|
||||
|
||||
The example output has been trimmed in the middle for brevity.
|
||||
|
||||
The second column shows the value of the absolute largest element, so if you have a closer look at the last few frames,
|
||||
the inputs and outputs were in the range of ``1e4``. So when this training was done under fp16 mixed precision the very
|
||||
last step overflowed (since under ``fp16`` the largest number before ``inf`` is ``64e3``). To avoid overflows under
|
||||
``fp16`` the activations must remain way below ``1e4``, because ``1e4 * 1e4 = 1e8`` so any matrix multiplication with
|
||||
large activations is going to lead to a numerical overflow condition.
|
||||
|
||||
At the very start of the trace you can discover at which batch number the problem occurred (here ``Detected inf/nan
|
||||
during batch_number=0`` means the problem occurred on the first batch).
|
||||
|
||||
Each reported frame starts by declaring the fully qualified entry for the corresponding module this frame is reporting
|
||||
for. If we look just at this frame:
|
||||
|
||||
.. code-block::
|
||||
|
||||
encoder.block.2.layer.1.layer_norm T5LayerNorm
|
||||
8.69e-02 4.18e-01 weight
|
||||
2.65e-04 3.42e+03 input[0]
|
||||
1.79e-06 4.65e+00 output
|
||||
|
||||
Here, ``encoder.block.2.layer.1.layer_norm`` indicates that it was a layer norm for the first layer, of the second
|
||||
block of the encoder. And the specific calls of the ``forward`` is ``T5LayerNorm``.
|
||||
|
||||
Let's look at the last few frames of that report:
|
||||
|
||||
.. code-block::
|
||||
|
||||
Detected inf/nan during batch_number=0
|
||||
Last 21 forward frames:
|
||||
abs min abs max metadata
|
||||
[...]
|
||||
encoder.block.2.layer.1.DenseReluDense.wi_0 Linear
|
||||
2.17e-07 4.50e+00 weight
|
||||
1.79e-06 4.65e+00 input[0]
|
||||
2.68e-06 3.70e+01 output
|
||||
encoder.block.2.layer.1.DenseReluDense.wi_1 Linear
|
||||
8.08e-07 2.66e+01 weight
|
||||
1.79e-06 4.65e+00 input[0]
|
||||
1.27e-04 2.37e+02 output
|
||||
encoder.block.2.layer.1.DenseReluDense.wo Linear
|
||||
1.01e-06 6.44e+00 weight
|
||||
0.00e+00 9.74e+03 input[0]
|
||||
3.18e-04 6.27e+04 output
|
||||
encoder.block.2.layer.1.DenseReluDense T5DenseGatedGeluDense
|
||||
1.79e-06 4.65e+00 input[0]
|
||||
3.18e-04 6.27e+04 output
|
||||
encoder.block.2.layer.1.dropout Dropout
|
||||
3.18e-04 6.27e+04 input[0]
|
||||
0.00e+00 inf output
|
||||
|
||||
The last frame reports for ``Dropout.forward`` function with the first entry for the only input and the second for the
|
||||
only output. You can see that it was called from an attribute ``dropout`` inside ``DenseReluDense`` class. We can see
|
||||
that it happened during the first layer, of the 2nd block, during the very first batch. Finally, the absolute largest
|
||||
input elements was ``6.27e+04`` and same for the output was ``inf``.
|
||||
|
||||
You can see here, that ``T5DenseGatedGeluDense.forward`` resulted in output activations, whose absolute max value was
|
||||
around 62.7K, which is very close to fp16's top limit of 64K. In the next frame we have ``Dropout`` which renormalizes
|
||||
the weights, after it zeroed some of the elements, which pushes the absolute max value to more than 64K, and we get an
|
||||
overflow (``inf``).
|
||||
|
||||
As you can see it's the previous frames that we need to look into when the numbers start going into very large for fp16
|
||||
numbers.
|
||||
|
||||
Let's match the report to the code from ``models/t5/modeling_t5.py``:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
class T5DenseGatedGeluDense(nn.Module):
|
||||
def __init__(self, config):
|
||||
super().__init__()
|
||||
self.wi_0 = nn.Linear(config.d_model, config.d_ff, bias=False)
|
||||
self.wi_1 = nn.Linear(config.d_model, config.d_ff, bias=False)
|
||||
self.wo = nn.Linear(config.d_ff, config.d_model, bias=False)
|
||||
self.dropout = nn.Dropout(config.dropout_rate)
|
||||
self.gelu_act = ACT2FN["gelu_new"]
|
||||
|
||||
def forward(self, hidden_states):
|
||||
hidden_gelu = self.gelu_act(self.wi_0(hidden_states))
|
||||
hidden_linear = self.wi_1(hidden_states)
|
||||
hidden_states = hidden_gelu * hidden_linear
|
||||
hidden_states = self.dropout(hidden_states)
|
||||
hidden_states = self.wo(hidden_states)
|
||||
return hidden_states
|
||||
|
||||
Now it's easy to see the ``dropout`` call, and all the previous calls as well.
|
||||
|
||||
Since the detection is happening in a forward hook, these reports are printed immediately after each ``forward``
|
||||
returns.
|
||||
|
||||
Going back to the full report, to act on it and to fix the problem, we need to go a few frames up where the numbers
|
||||
started to go up and most likely switch to the ``fp32`` mode here, so that the numbers don't overflow when multiplied
|
||||
or summed up. Of course, there might be other solutions. For example, we could turn off ``amp`` temporarily if it's
|
||||
enabled, after moving the original ``forward`` into a helper wrapper, like so:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
def _forward(self, hidden_states):
|
||||
hidden_gelu = self.gelu_act(self.wi_0(hidden_states))
|
||||
hidden_linear = self.wi_1(hidden_states)
|
||||
hidden_states = hidden_gelu * hidden_linear
|
||||
hidden_states = self.dropout(hidden_states)
|
||||
hidden_states = self.wo(hidden_states)
|
||||
return hidden_states
|
||||
|
||||
import torch
|
||||
def forward(self, hidden_states):
|
||||
if torch.is_autocast_enabled():
|
||||
with torch.cuda.amp.autocast(enabled=False):
|
||||
return self._forward(hidden_states)
|
||||
else:
|
||||
return self._forward(hidden_states)
|
||||
|
||||
Since the automatic detector only reports on inputs and outputs of full frames, once you know where to look, you may
|
||||
want to analyse the intermediary stages of any specific ``forward`` function as well. In such a case you can use the
|
||||
``detect_overflow`` helper function to inject the detector where you want it, for example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from debug_utils import detect_overflow
|
||||
|
||||
class T5LayerFF(nn.Module):
|
||||
[...]
|
||||
def forward(self, hidden_states):
|
||||
forwarded_states = self.layer_norm(hidden_states)
|
||||
detect_overflow(forwarded_states, "after layer_norm")
|
||||
forwarded_states = self.DenseReluDense(forwarded_states)
|
||||
detect_overflow(forwarded_states, "after DenseReluDense")
|
||||
return hidden_states + self.dropout(forwarded_states)
|
||||
|
||||
You can see that we added 2 of these and now we track if ``inf`` or ``nan`` for ``forwarded_states`` was detected
|
||||
somewhere in between.
|
||||
|
||||
Actually, the detector already reports these because each of the calls in the example above is a `nn.Module``, but
|
||||
let's say if you had some local direct calculations this is how you'd do that.
|
||||
|
||||
Additionally, if you're instantiating the debugger in your own code, you can adjust the number of frames printed from
|
||||
its default, e.g.:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from .debug_utils import DebugUnderflowOverflow
|
||||
debug_overflow = DebugUnderflowOverflow(model, max_frames_to_save=100)
|
||||
|
||||
Specific batch absolute mix and max value tracing
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The same debugging class can be used for per-batch tracing with the underflow/overflow detection feature turned off.
|
||||
|
||||
Let's say you want to watch the absolute min and max values for all the ingredients of each ``forward`` call of a given
|
||||
batch, and only do that for batches 1 and 3. Then you instantiate this class as:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
debug_overflow = DebugUnderflowOverflow(model, trace_batch_nums=[1,3])
|
||||
|
||||
And now full batches 1 and 3 will be traced using the same format as the underflow/overflow detector does.
|
||||
|
||||
Batches are 0-indexed.
|
||||
|
||||
This is helpful if you know that the program starts misbehaving after a certain batch number, so you can fast-forward
|
||||
right to that area. Here is a sample truncated output for such configuration:
|
||||
|
||||
.. code-block::
|
||||
|
||||
*** Starting batch number=1 ***
|
||||
abs min abs max metadata
|
||||
shared Embedding
|
||||
1.01e-06 7.92e+02 weight
|
||||
0.00e+00 2.47e+04 input[0]
|
||||
5.36e-05 7.92e+02 output
|
||||
[...]
|
||||
decoder.dropout Dropout
|
||||
1.60e-07 2.27e+01 input[0]
|
||||
0.00e+00 2.52e+01 output
|
||||
decoder T5Stack
|
||||
not a tensor output
|
||||
lm_head Linear
|
||||
1.01e-06 7.92e+02 weight
|
||||
0.00e+00 1.11e+00 input[0]
|
||||
6.06e-02 8.39e+01 output
|
||||
T5ForConditionalGeneration
|
||||
not a tensor output
|
||||
|
||||
*** Starting batch number=3 ***
|
||||
abs min abs max metadata
|
||||
shared Embedding
|
||||
1.01e-06 7.92e+02 weight
|
||||
0.00e+00 2.78e+04 input[0]
|
||||
5.36e-05 7.92e+02 output
|
||||
[...]
|
||||
|
||||
Here you will get a huge number of frames dumped - as many as there were forward calls in your model, so it may or may
|
||||
not what you want, but sometimes it can be easier to use for debugging purposes than a normal debugger. For example, if
|
||||
a problem starts happening at batch number 150. So you can dump traces for batches 149 and 150 and compare where
|
||||
numbers started to diverge.
|
||||
|
||||
You can also specify the batch number after which to stop the training, with:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
debug_overflow = DebugUnderflowOverflow(model, trace_batch_nums=[1,3], abort_after_batch_num=3)
|
||||
14
docs/source/en/_config.py
Normal file
14
docs/source/en/_config.py
Normal file
@ -0,0 +1,14 @@
|
||||
# docstyle-ignore
|
||||
INSTALL_CONTENT = """
|
||||
# Transformers installation
|
||||
! pip install transformers datasets
|
||||
# To install from source instead of the last release, comment the command above and uncomment the following one.
|
||||
# ! pip install git+https://github.com/huggingface/transformers.git
|
||||
"""
|
||||
|
||||
notebook_first_cells = [{"type": "code", "content": INSTALL_CONTENT}]
|
||||
black_avoid_patterns = {
|
||||
"{processor_class}": "FakeProcessorClass",
|
||||
"{model_class}": "FakeModelClass",
|
||||
"{object_class}": "FakeObjectClass",
|
||||
}
|
||||
@ -5,73 +5,105 @@
|
||||
title: Quick tour
|
||||
- local: installation
|
||||
title: Installation
|
||||
- local: philosophy
|
||||
title: Philosophy
|
||||
- local: glossary
|
||||
title: Glossary
|
||||
title: Get started
|
||||
- sections:
|
||||
- local: task_summary
|
||||
title: Summary of the tasks
|
||||
- local: model_summary
|
||||
title: Summary of the models
|
||||
- local: pipeline_tutorial
|
||||
title: Pipelines for inference
|
||||
- local: autoclass_tutorial
|
||||
title: Load pretrained instances with an AutoClass
|
||||
- local: preprocessing
|
||||
title: Preprocessing data
|
||||
title: Preprocess
|
||||
- local: training
|
||||
title: Fine-tuning a pretrained model
|
||||
title: Fine-tune a pretrained model
|
||||
- local: accelerate
|
||||
title: Distributed training with 🤗 Accelerate
|
||||
- local: model_sharing
|
||||
title: Model sharing and uploading
|
||||
- local: tokenizer_summary
|
||||
title: Summary of the tokenizers
|
||||
- local: multilingual
|
||||
title: Multi-lingual models
|
||||
title: "Using 🤗 Transformers"
|
||||
title: Share a model
|
||||
title: Tutorials
|
||||
- sections:
|
||||
- local: examples
|
||||
title: Examples
|
||||
- local: troubleshooting
|
||||
title: Troubleshooting
|
||||
- local: custom_datasets
|
||||
title: Fine-tuning with custom datasets
|
||||
- local: notebooks
|
||||
title: "🤗 Transformers Notebooks"
|
||||
- local: fast_tokenizers
|
||||
title: "Use tokenizers from 🤗 Tokenizers"
|
||||
- local: create_a_model
|
||||
title: Create a custom architecture
|
||||
- local: custom_models
|
||||
title: Sharing custom models
|
||||
- sections:
|
||||
- local: tasks/sequence_classification
|
||||
title: Text classification
|
||||
- local: tasks/token_classification
|
||||
title: Token classification
|
||||
- local: tasks/question_answering
|
||||
title: Question answering
|
||||
- local: tasks/language_modeling
|
||||
title: Language modeling
|
||||
- local: tasks/translation
|
||||
title: Translation
|
||||
- local: tasks/summarization
|
||||
title: Summarization
|
||||
- local: tasks/multiple_choice
|
||||
title: Multiple choice
|
||||
- local: tasks/audio_classification
|
||||
title: Audio classification
|
||||
- local: tasks/asr
|
||||
title: Automatic speech recognition
|
||||
- local: tasks/image_classification
|
||||
title: Image classification
|
||||
title: Fine-tune for downstream tasks
|
||||
- local: run_scripts
|
||||
title: Train with a script
|
||||
- local: sagemaker
|
||||
title: Run training on Amazon SageMaker
|
||||
- local: community
|
||||
title: Community
|
||||
- local: multilingual
|
||||
title: Inference for multilingual models
|
||||
- local: converting_tensorflow_models
|
||||
title: Converting Tensorflow Checkpoints
|
||||
title: Converting TensorFlow Checkpoints
|
||||
- local: serialization
|
||||
title: Export 🤗 Transformers models
|
||||
- local: performance
|
||||
title: 'Performance and Scalability: How To Fit a Bigger Model and Train It Faster'
|
||||
- local: parallelism
|
||||
title: Model Parallelism
|
||||
- local: benchmarks
|
||||
title: Benchmarks
|
||||
- local: migration
|
||||
title: Migrating from previous packages
|
||||
- local: troubleshooting
|
||||
title: Troubleshoot
|
||||
- local: debugging
|
||||
title: Debugging
|
||||
- local: notebooks
|
||||
title: "🤗 Transformers Notebooks"
|
||||
- local: community
|
||||
title: Community
|
||||
- local: contributing
|
||||
title: How to contribute to transformers?
|
||||
- local: add_new_model
|
||||
title: "How to add a model to 🤗 Transformers?"
|
||||
- local: add_new_pipeline
|
||||
title: "How to add a pipeline to 🤗 Transformers?"
|
||||
- local: fast_tokenizers
|
||||
title: "Using tokenizers from 🤗 Tokenizers"
|
||||
- local: performance
|
||||
title: 'Performance and Scalability: How To Fit a Bigger Model and Train It Faster'
|
||||
- local: parallelism
|
||||
title: Model Parallelism
|
||||
- local: testing
|
||||
title: Testing
|
||||
- local: debugging
|
||||
title: Debugging
|
||||
- local: serialization
|
||||
title: Exporting transformers models
|
||||
- local: pr_checks
|
||||
title: Checks on a Pull Request
|
||||
title: Advanced guides
|
||||
title: How-to guides
|
||||
- sections:
|
||||
- local: philosophy
|
||||
title: Philosophy
|
||||
- local: glossary
|
||||
title: Glossary
|
||||
- local: task_summary
|
||||
title: Summary of the tasks
|
||||
- local: model_summary
|
||||
title: Summary of the models
|
||||
- local: tokenizer_summary
|
||||
title: Summary of the tokenizers
|
||||
- local: pad_truncation
|
||||
title: Padding and truncation
|
||||
- local: bertology
|
||||
title: BERTology
|
||||
- local: perplexity
|
||||
title: Perplexity of fixed-length models
|
||||
- local: benchmarks
|
||||
title: Benchmarks
|
||||
title: Research
|
||||
title: Conceptual guides
|
||||
- sections:
|
||||
- sections:
|
||||
- local: main_classes/callback
|
||||
@ -86,6 +118,10 @@
|
||||
title: Logging
|
||||
- local: main_classes/model
|
||||
title: Models
|
||||
- local: main_classes/text_generation
|
||||
title: Text Generation
|
||||
- local: main_classes/onnx
|
||||
title: ONNX
|
||||
- local: main_classes/optimizer_schedules
|
||||
title: Optimization
|
||||
- local: main_classes/output
|
||||
@ -120,17 +156,17 @@
|
||||
title: BERT
|
||||
- local: model_doc/bertweet
|
||||
title: Bertweet
|
||||
- local: model_doc/bertgeneration
|
||||
- local: model_doc/bert-generation
|
||||
title: BertGeneration
|
||||
- local: model_doc/bert_japanese
|
||||
- local: model_doc/bert-japanese
|
||||
title: BertJapanese
|
||||
- local: model_doc/bigbird
|
||||
- local: model_doc/big_bird
|
||||
title: BigBird
|
||||
- local: model_doc/bigbird_pegasus
|
||||
title: BigBirdPegasus
|
||||
- local: model_doc/blenderbot
|
||||
title: Blenderbot
|
||||
- local: model_doc/blenderbot_small
|
||||
- local: model_doc/blenderbot-small
|
||||
title: Blenderbot Small
|
||||
- local: model_doc/bort
|
||||
title: BORT
|
||||
@ -140,6 +176,8 @@
|
||||
title: CamemBERT
|
||||
- local: model_doc/canine
|
||||
title: CANINE
|
||||
- local: model_doc/convnext
|
||||
title: ConvNeXT
|
||||
- local: model_doc/clip
|
||||
title: CLIP
|
||||
- local: model_doc/convbert
|
||||
@ -148,10 +186,14 @@
|
||||
title: CPM
|
||||
- local: model_doc/ctrl
|
||||
title: CTRL
|
||||
- local: model_doc/data2vec
|
||||
title: Data2Vec
|
||||
- local: model_doc/deberta
|
||||
title: DeBERTa
|
||||
- local: model_doc/deberta_v2
|
||||
- local: model_doc/deberta-v2
|
||||
title: DeBERTa-v2
|
||||
- local: model_doc/decision_transformer
|
||||
title: Decision Transformer
|
||||
- local: model_doc/deit
|
||||
title: DeiT
|
||||
- local: model_doc/detr
|
||||
@ -160,11 +202,15 @@
|
||||
title: DialoGPT
|
||||
- local: model_doc/distilbert
|
||||
title: DistilBERT
|
||||
- local: model_doc/dit
|
||||
title: DiT
|
||||
- local: model_doc/dpr
|
||||
title: DPR
|
||||
- local: model_doc/dpt
|
||||
title: DPT
|
||||
- local: model_doc/electra
|
||||
title: ELECTRA
|
||||
- local: model_doc/encoderdecoder
|
||||
- local: model_doc/encoder-decoder
|
||||
title: Encoder Decoder Models
|
||||
- local: model_doc/flaubert
|
||||
title: FlauBERT
|
||||
@ -174,8 +220,10 @@
|
||||
title: FSMT
|
||||
- local: model_doc/funnel
|
||||
title: Funnel Transformer
|
||||
- local: model_doc/glpn
|
||||
title: GLPN
|
||||
- local: model_doc/herbert
|
||||
title: herBERT
|
||||
title: HerBERT
|
||||
- local: model_doc/ibert
|
||||
title: I-BERT
|
||||
- local: model_doc/imagegpt
|
||||
@ -196,23 +244,27 @@
|
||||
title: LXMERT
|
||||
- local: model_doc/marian
|
||||
title: MarianMT
|
||||
- local: model_doc/maskformer
|
||||
title: MaskFormer
|
||||
- local: model_doc/m2m_100
|
||||
title: M2M100
|
||||
- local: model_doc/mbart
|
||||
title: MBart and MBart-50
|
||||
- local: model_doc/megatron_bert
|
||||
- local: model_doc/megatron-bert
|
||||
title: MegatronBERT
|
||||
- local: model_doc/megatron_gpt2
|
||||
title: MegatronGPT2
|
||||
- local: model_doc/mobilebert
|
||||
title: MobileBERT
|
||||
- local: model_doc/mluke
|
||||
title: mLUKE
|
||||
- local: model_doc/mobilebert
|
||||
title: MobileBERT
|
||||
- local: model_doc/mpnet
|
||||
title: MPNet
|
||||
- local: model_doc/mt5
|
||||
title: MT5
|
||||
- local: model_doc/gpt
|
||||
- local: model_doc/nystromformer
|
||||
title: Nyströmformer
|
||||
- local: model_doc/openai-gpt
|
||||
title: OpenAI GPT
|
||||
- local: model_doc/gpt2
|
||||
title: OpenAI GPT2
|
||||
@ -228,16 +280,24 @@
|
||||
title: Pegasus
|
||||
- local: model_doc/phobert
|
||||
title: PhoBERT
|
||||
- local: model_doc/plbart
|
||||
title: PLBart
|
||||
- local: model_doc/poolformer
|
||||
title: PoolFormer
|
||||
- local: model_doc/prophetnet
|
||||
title: ProphetNet
|
||||
- local: model_doc/qdqbert
|
||||
title: QDQBert
|
||||
- local: model_doc/rag
|
||||
title: RAG
|
||||
- local: model_doc/realm
|
||||
title: REALM
|
||||
- local: model_doc/reformer
|
||||
title: Reformer
|
||||
- local: model_doc/rembert
|
||||
title: RemBERT
|
||||
- local: model_doc/resnet
|
||||
title: ResNet
|
||||
- local: model_doc/retribert
|
||||
title: RetriBERT
|
||||
- local: model_doc/roberta
|
||||
@ -248,9 +308,9 @@
|
||||
title: SegFormer
|
||||
- local: model_doc/sew
|
||||
title: SEW
|
||||
- local: model_doc/sew_d
|
||||
- local: model_doc/sew-d
|
||||
title: SEW-D
|
||||
- local: model_doc/speechencoderdecoder
|
||||
- local: model_doc/speech-encoder-decoder
|
||||
title: Speech Encoder Decoder Models
|
||||
- local: model_doc/speech_to_text
|
||||
title: Speech2Text
|
||||
@ -260,40 +320,60 @@
|
||||
title: Splinter
|
||||
- local: model_doc/squeezebert
|
||||
title: SqueezeBERT
|
||||
- local: model_doc/swin
|
||||
title: Swin Transformer
|
||||
- local: model_doc/t5
|
||||
title: T5
|
||||
- local: model_doc/t5v1.1
|
||||
title: T5v1.1
|
||||
- local: model_doc/tapas
|
||||
title: TAPAS
|
||||
- local: model_doc/transformerxl
|
||||
- local: model_doc/transfo-xl
|
||||
title: Transformer XL
|
||||
- local: model_doc/trocr
|
||||
title: TrOCR
|
||||
- local: model_doc/unispeech
|
||||
title: UniSpeech
|
||||
- local: model_doc/unispeech_sat
|
||||
- local: model_doc/unispeech-sat
|
||||
title: UniSpeech-SAT
|
||||
- local: model_doc/visionencoderdecoder
|
||||
- local: model_doc/van
|
||||
title: VAN
|
||||
- local: model_doc/vilt
|
||||
title: ViLT
|
||||
- local: model_doc/vision-encoder-decoder
|
||||
title: Vision Encoder Decoder Models
|
||||
- local: model_doc/vision_text_dual_encoder
|
||||
- local: model_doc/vision-text-dual-encoder
|
||||
title: Vision Text Dual Encoder
|
||||
- local: model_doc/vit
|
||||
title: Vision Transformer (ViT)
|
||||
- local: model_doc/vit_mae
|
||||
title: ViTMAE
|
||||
- local: model_doc/visual_bert
|
||||
title: VisualBERT
|
||||
- local: model_doc/wav2vec2
|
||||
title: Wav2Vec2
|
||||
- local: model_doc/wav2vec2_phoneme
|
||||
title: Wav2Vec2Phoneme
|
||||
- local: model_doc/wavlm
|
||||
title: WavLM
|
||||
- local: model_doc/xglm
|
||||
title: XGLM
|
||||
- local: model_doc/xlm
|
||||
title: XLM
|
||||
- local: model_doc/xlmprophetnet
|
||||
- local: model_doc/xlm-prophetnet
|
||||
title: XLM-ProphetNet
|
||||
- local: model_doc/xlmroberta
|
||||
- local: model_doc/xlm-roberta
|
||||
title: XLM-RoBERTa
|
||||
- local: model_doc/xlm-roberta-xl
|
||||
title: XLM-RoBERTa-XL
|
||||
- local: model_doc/xlnet
|
||||
title: XLNet
|
||||
- local: model_doc/xlsr_wav2vec2
|
||||
title: XLSR-Wav2Vec2
|
||||
- local: model_doc/xls_r
|
||||
title: XLS-R
|
||||
- local: model_doc/yoso
|
||||
title: YOSO
|
||||
title: Models
|
||||
- sections:
|
||||
- local: internal/modeling_utils
|
||||
132
docs/source/en/accelerate.mdx
Normal file
132
docs/source/en/accelerate.mdx
Normal file
@ -0,0 +1,132 @@
|
||||
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
# Distributed training with 🤗 Accelerate
|
||||
|
||||
As models get bigger, parallelism has emerged as a strategy for training larger models on limited hardware and accelerating training speed by several orders of magnitude. At Hugging Face, we created the [🤗 Accelerate](https://huggingface.co/docs/accelerate/index.html) library to help users easily train a 🤗 Transformers model on any type of distributed setup, whether it is multiple GPU's on one machine or multiple GPU's across several machines. In this tutorial, learn how to customize your native PyTorch training loop to enable training in a distributed environment.
|
||||
|
||||
## Setup
|
||||
|
||||
Get started by installing 🤗 Accelerate:
|
||||
|
||||
```bash
|
||||
pip install accelerate
|
||||
```
|
||||
|
||||
Then import and create an [`Accelerator`](https://huggingface.co/docs/accelerate/accelerator.html#accelerate.Accelerator) object. `Accelerator` will automatically detect your type of distributed setup and initialize all the necessary components for training. You don't need to explicitly place your model on a device.
|
||||
|
||||
```py
|
||||
>>> from accelerate import Accelerator
|
||||
|
||||
>>> accelerator = Accelerator()
|
||||
```
|
||||
|
||||
## Prepare to accelerate
|
||||
|
||||
The next step is to pass all the relevant training objects to the [`prepare`](https://huggingface.co/docs/accelerate/accelerator.html#accelerate.Accelerator.prepare) method. This includes your training and evaluation DataLoaders, a model and an optimizer:
|
||||
|
||||
```py
|
||||
>>> train_dataloader, eval_dataloader, model, optimizer = accelerator.prepare(
|
||||
... train_dataloader, eval_dataloader, model, optimizer
|
||||
... )
|
||||
```
|
||||
|
||||
## Backward
|
||||
|
||||
The last addition is to replace the typical `loss.backward()` in your training loop with 🤗 Accelerate's [`backward`](https://huggingface.co/docs/accelerate/accelerator.html#accelerate.Accelerator.backward) method:
|
||||
|
||||
```py
|
||||
>>> for epoch in range(num_epochs):
|
||||
... for batch in train_dataloader:
|
||||
... outputs = model(**batch)
|
||||
... loss = outputs.loss
|
||||
... accelerator.backward(loss)
|
||||
|
||||
... optimizer.step()
|
||||
... lr_scheduler.step()
|
||||
... optimizer.zero_grad()
|
||||
... progress_bar.update(1)
|
||||
```
|
||||
|
||||
As you can see in the following code, you only need to add four additional lines of code to your training loop to enable distributed training!
|
||||
|
||||
```diff
|
||||
+ from accelerate import Accelerator
|
||||
from transformers import AdamW, AutoModelForSequenceClassification, get_scheduler
|
||||
|
||||
+ accelerator = Accelerator()
|
||||
|
||||
model = AutoModelForSequenceClassification.from_pretrained(checkpoint, num_labels=2)
|
||||
optimizer = AdamW(model.parameters(), lr=3e-5)
|
||||
|
||||
- device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
|
||||
- model.to(device)
|
||||
|
||||
+ train_dataloader, eval_dataloader, model, optimizer = accelerator.prepare(
|
||||
+ train_dataloader, eval_dataloader, model, optimizer
|
||||
+ )
|
||||
|
||||
num_epochs = 3
|
||||
num_training_steps = num_epochs * len(train_dataloader)
|
||||
lr_scheduler = get_scheduler(
|
||||
"linear",
|
||||
optimizer=optimizer,
|
||||
num_warmup_steps=0,
|
||||
num_training_steps=num_training_steps
|
||||
)
|
||||
|
||||
progress_bar = tqdm(range(num_training_steps))
|
||||
|
||||
model.train()
|
||||
for epoch in range(num_epochs):
|
||||
for batch in train_dataloader:
|
||||
- batch = {k: v.to(device) for k, v in batch.items()}
|
||||
outputs = model(**batch)
|
||||
loss = outputs.loss
|
||||
- loss.backward()
|
||||
+ accelerator.backward(loss)
|
||||
|
||||
optimizer.step()
|
||||
lr_scheduler.step()
|
||||
optimizer.zero_grad()
|
||||
progress_bar.update(1)
|
||||
```
|
||||
|
||||
## Train
|
||||
|
||||
Once you've added the relevant lines of code, launch your training in a script or a notebook like Colaboratory.
|
||||
|
||||
### Train with a script
|
||||
|
||||
If you are running your training from a script, run the following command to create and save a configuration file:
|
||||
|
||||
```bash
|
||||
accelerate config
|
||||
```
|
||||
|
||||
Then launch your training with:
|
||||
|
||||
```bash
|
||||
accelerate launch train.py
|
||||
```
|
||||
|
||||
### Train with a notebook
|
||||
|
||||
🤗 Accelerate can also run in a notebook if you're planning on using Colaboratory's TPUs. Wrap all the code responsible for training in a function, and pass it to `notebook_launcher`:
|
||||
|
||||
```py
|
||||
>>> from accelerate import notebook_launcher
|
||||
|
||||
>>> notebook_launcher(training_function)
|
||||
```
|
||||
|
||||
For more information about 🤗 Accelerate and it's rich features, refer to the [documentation](https://huggingface.co/docs/accelerate/index.html).
|
||||
File diff suppressed because it is too large
Load Diff
140
docs/source/en/add_new_pipeline.mdx
Normal file
140
docs/source/en/add_new_pipeline.mdx
Normal file
@ -0,0 +1,140 @@
|
||||
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
-->
|
||||
|
||||
# How to add a pipeline to 🤗 Transformers?
|
||||
|
||||
First and foremost, you need to decide the raw entries the pipeline will be able to take. It can be strings, raw bytes,
|
||||
dictionaries or whatever seems to be the most likely desired input. Try to keep these inputs as pure Python as possible
|
||||
as it makes compatibility easier (even through other languages via JSON). Those will be the `inputs` of the
|
||||
pipeline (`preprocess`).
|
||||
|
||||
Then define the `outputs`. Same policy as the `inputs`. The simpler, the better. Those will be the outputs of
|
||||
`postprocess` method.
|
||||
|
||||
Start by inheriting the base class `Pipeline`. with the 4 methods needed to implement `preprocess`,
|
||||
`_forward`, `postprocess` and `_sanitize_parameters`.
|
||||
|
||||
|
||||
```python
|
||||
from transformers import Pipeline
|
||||
|
||||
|
||||
class MyPipeline(Pipeline):
|
||||
def _sanitize_parameters(self, **kwargs):
|
||||
preprocess_kwargs = {}
|
||||
if "maybe_arg" in kwargs:
|
||||
preprocess_kwargs["maybe_arg"] = kwargs["maybe_arg"]
|
||||
return preprocess_kwargs, {}, {}
|
||||
|
||||
def preprocess(self, inputs, maybe_arg=2):
|
||||
model_input = Tensor(inputs["input_ids"])
|
||||
return {"model_input": model_input}
|
||||
|
||||
def _forward(self, model_inputs):
|
||||
# model_inputs == {"model_input": model_input}
|
||||
outputs = self.model(**model_inputs)
|
||||
# Maybe {"logits": Tensor(...)}
|
||||
return outputs
|
||||
|
||||
def postprocess(self, model_outputs):
|
||||
best_class = model_outputs["logits"].softmax(-1)
|
||||
return best_class
|
||||
```
|
||||
|
||||
The structure of this breakdown is to support relatively seamless support for CPU/GPU, while supporting doing
|
||||
pre/postprocessing on the CPU on different threads
|
||||
|
||||
`preprocess` will take the originally defined inputs, and turn them into something feedable to the model. It might
|
||||
contain more information and is usually a `Dict`.
|
||||
|
||||
`_forward` is the implementation detail and is not meant to be called directly. `forward` is the preferred
|
||||
called method as it contains safeguards to make sure everything is working on the expected device. If anything is
|
||||
linked to a real model it belongs in the `_forward` method, anything else is in the preprocess/postprocess.
|
||||
|
||||
`postprocess` methods will take the output of `_forward` and turn it into the final output that were decided
|
||||
earlier.
|
||||
|
||||
`_sanitize_parameters` exists to allow users to pass any parameters whenever they wish, be it at initialization
|
||||
time `pipeline(...., maybe_arg=4)` or at call time `pipe = pipeline(...); output = pipe(...., maybe_arg=4)`.
|
||||
|
||||
The returns of `_sanitize_parameters` are the 3 dicts of kwargs that will be passed directly to `preprocess`,
|
||||
`_forward` and `postprocess`. Don't fill anything if the caller didn't call with any extra parameter. That
|
||||
allows to keep the default arguments in the function definition which is always more "natural".
|
||||
|
||||
A classic example would be a `top_k` argument in the post processing in classification tasks.
|
||||
|
||||
```python
|
||||
>>> pipe = pipeline("my-new-task")
|
||||
>>> pipe("This is a test")
|
||||
[{"label": "1-star", "score": 0.8}, {"label": "2-star", "score": 0.1}, {"label": "3-star", "score": 0.05}
|
||||
{"label": "4-star", "score": 0.025}, {"label": "5-star", "score": 0.025}]
|
||||
|
||||
>>> pipe("This is a test", top_k=2)
|
||||
[{"label": "1-star", "score": 0.8}, {"label": "2-star", "score": 0.1}]
|
||||
```
|
||||
|
||||
In order to achieve that, we'll update our `postprocess` method with a default parameter to `5`. and edit
|
||||
`_sanitize_parameters` to allow this new parameter.
|
||||
|
||||
|
||||
```python
|
||||
def postprocess(self, model_outputs, top_k=5):
|
||||
best_class = model_outputs["logits"].softmax(-1)
|
||||
# Add logic to handle top_k
|
||||
return best_class
|
||||
|
||||
|
||||
def _sanitize_parameters(self, **kwargs):
|
||||
preprocess_kwargs = {}
|
||||
if "maybe_arg" in kwargs:
|
||||
preprocess_kwargs["maybe_arg"] = kwargs["maybe_arg"]
|
||||
|
||||
postprocess_kwargs = {}
|
||||
if "top_k" in kwargs:
|
||||
preprocess_kwargs["top_k"] = kwargs["top_k"]
|
||||
return preprocess_kwargs, {}, postprocess_kwargs
|
||||
```
|
||||
|
||||
Try to keep the inputs/outputs very simple and ideally JSON-serializable as it makes the pipeline usage very easy
|
||||
without requiring users to understand new kind of objects. It's also relatively common to support many different types
|
||||
of arguments for ease of use (audio files, can be filenames, URLs or pure bytes)
|
||||
|
||||
|
||||
|
||||
## Adding it to the list of supported tasks
|
||||
|
||||
Go to `src/transformers/pipelines/__init__.py` and fill in `SUPPORTED_TASKS` with your newly created pipeline.
|
||||
If possible it should provide a default model.
|
||||
|
||||
## Adding tests
|
||||
|
||||
Create a new file `tests/test_pipelines_MY_PIPELINE.py` with example with the other tests.
|
||||
|
||||
The `run_pipeline_test` function will be very generic and run on small random models on every possible
|
||||
architecture as defined by `model_mapping` and `tf_model_mapping`.
|
||||
|
||||
This is very important to test future compatibility, meaning if someone adds a new model for
|
||||
`XXXForQuestionAnswering` then the pipeline test will attempt to run on it. Because the models are random it's
|
||||
impossible to check for actual values, that's why There is a helper `ANY` that will simply attempt to match the
|
||||
output of the pipeline TYPE.
|
||||
|
||||
You also *need* to implement 2 (ideally 4) tests.
|
||||
|
||||
- `test_small_model_pt` : Define 1 small model for this pipeline (doesn't matter if the results don't make sense)
|
||||
and test the pipeline outputs. The results should be the same as `test_small_model_tf`.
|
||||
- `test_small_model_tf` : Define 1 small model for this pipeline (doesn't matter if the results don't make sense)
|
||||
and test the pipeline outputs. The results should be the same as `test_small_model_pt`.
|
||||
- `test_large_model_pt` (`optional`): Tests the pipeline on a real pipeline where the results are supposed to
|
||||
make sense. These tests are slow and should be marked as such. Here the goal is to showcase the pipeline and to make
|
||||
sure there is no drift in future releases
|
||||
- `test_large_model_tf` (`optional`): Tests the pipeline on a real pipeline where the results are supposed to
|
||||
make sense. These tests are slow and should be marked as such. Here the goal is to showcase the pipeline and to make
|
||||
sure there is no drift in future releases
|
||||
119
docs/source/en/autoclass_tutorial.mdx
Normal file
119
docs/source/en/autoclass_tutorial.mdx
Normal file
@ -0,0 +1,119 @@
|
||||
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
# Load pretrained instances with an AutoClass
|
||||
|
||||
With so many different Transformer architectures, it can be challenging to create one for your checkpoint. As a part of 🤗 Transformers core philosophy to make the library easy, simple and flexible to use, an `AutoClass` automatically infer and load the correct architecture from a given checkpoint. The `from_pretrained` method lets you quickly load a pretrained model for any architecture so you don't have to devote time and resources to train a model from scratch. Producing this type of checkpoint-agnostic code means if your code works for one checkpoint, it will work with another checkpoint - as long as it was trained for a similar task - even if the architecture is different.
|
||||
|
||||
<Tip>
|
||||
|
||||
Remember, architecture refers to the skeleton of the model and checkpoints are the weights for a given architecture. For example, [BERT](https://huggingface.co/bert-base-uncased) is an architecture, while `bert-base-uncased` is a checkpoint. Model is a general term that can mean either architecture or checkpoint.
|
||||
|
||||
</Tip>
|
||||
|
||||
In this tutorial, learn to:
|
||||
|
||||
* Load a pretrained tokenizer.
|
||||
* Load a pretrained feature extractor.
|
||||
* Load a pretrained processor.
|
||||
* Load a pretrained model.
|
||||
|
||||
## AutoTokenizer
|
||||
|
||||
Nearly every NLP task begins with a tokenizer. A tokenizer converts your input into a format that can be processed by the model.
|
||||
|
||||
Load a tokenizer with [`AutoTokenizer.from_pretrained`]:
|
||||
|
||||
```py
|
||||
>>> from transformers import AutoTokenizer
|
||||
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
|
||||
```
|
||||
|
||||
Then tokenize your input as shown below:
|
||||
|
||||
```py
|
||||
>>> sequence = "In a hole in the ground there lived a hobbit."
|
||||
>>> print(tokenizer(sequence))
|
||||
{'input_ids': [101, 1999, 1037, 4920, 1999, 1996, 2598, 2045, 2973, 1037, 7570, 10322, 4183, 1012, 102],
|
||||
'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
|
||||
'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]}
|
||||
```
|
||||
|
||||
## AutoFeatureExtractor
|
||||
|
||||
For audio and vision tasks, a feature extractor processes the audio signal or image into the correct input format.
|
||||
|
||||
Load a feature extractor with [`AutoFeatureExtractor.from_pretrained`]:
|
||||
|
||||
```py
|
||||
>>> from transformers import AutoFeatureExtractor
|
||||
|
||||
>>> feature_extractor = AutoFeatureExtractor.from_pretrained(
|
||||
... "ehcalabres/wav2vec2-lg-xlsr-en-speech-emotion-recognition"
|
||||
... )
|
||||
```
|
||||
|
||||
## AutoProcessor
|
||||
|
||||
Multimodal tasks require a processor that combines two types of preprocessing tools. For example, the [LayoutLMV2](model_doc/layoutlmv2) model requires a feature extractor to handle images and a tokenizer to handle text; a processor combines both of them.
|
||||
|
||||
Load a processor with [`AutoProcessor.from_pretrained`]:
|
||||
|
||||
```py
|
||||
>>> from transformers import AutoProcessor
|
||||
|
||||
>>> processor = AutoProcessor.from_pretrained("microsoft/layoutlmv2-base-uncased")
|
||||
```
|
||||
|
||||
## AutoModel
|
||||
|
||||
<frameworkcontent>
|
||||
<pt>
|
||||
Finally, the `AutoModelFor` classes let you load a pretrained model for a given task (see [here](model_doc/auto) for a complete list of available tasks). For example, load a model for sequence classification with [`AutoModelForSequenceClassification.from_pretrained`]:
|
||||
|
||||
```py
|
||||
>>> from transformers import AutoModelForSequenceClassification
|
||||
|
||||
>>> model = AutoModelForSequenceClassification.from_pretrained("distilbert-base-uncased")
|
||||
```
|
||||
|
||||
Easily reuse the same checkpoint to load an architecture for a different task:
|
||||
|
||||
```py
|
||||
>>> from transformers import AutoModelForTokenClassification
|
||||
|
||||
>>> model = AutoModelForTokenClassification.from_pretrained("distilbert-base-uncased")
|
||||
```
|
||||
|
||||
Generally, we recommend using the `AutoTokenizer` class and the `AutoModelFor` class to load pretrained instances of models. This will ensure you load the correct architecture every time. In the next [tutorial](preprocessing), learn how to use your newly loaded tokenizer, feature extractor and processor to preprocess a dataset for fine-tuning.
|
||||
</pt>
|
||||
<tf>
|
||||
Finally, the `TFAutoModelFor` classes let you load a pretrained model for a given task (see [here](model_doc/auto) for a complete list of available tasks). For example, load a model for sequence classification with [`TFAutoModelForSequenceClassification.from_pretrained`]:
|
||||
|
||||
```py
|
||||
>>> from transformers import TFAutoModelForSequenceClassification
|
||||
|
||||
>>> model = TFAutoModelForSequenceClassification.from_pretrained("distilbert-base-uncased")
|
||||
```
|
||||
|
||||
Easily reuse the same checkpoint to load an architecture for a different task:
|
||||
|
||||
```py
|
||||
>>> from transformers import TFAutoModelForTokenClassification
|
||||
|
||||
>>> model = TFAutoModelForTokenClassification.from_pretrained("distilbert-base-uncased")
|
||||
```
|
||||
|
||||
Generally, we recommend using the `AutoTokenizer` class and the `TFAutoModelFor` class to load pretrained instances of models. This will ensure you load the correct architecture every time. In the next [tutorial](preprocessing), learn how to use your newly loaded tokenizer, feature extractor and processor to preprocess a dataset for fine-tuning.
|
||||
</tf>
|
||||
</frameworkcontent>
|
||||
@ -12,15 +12,22 @@ specific language governing permissions and limitations under the License.
|
||||
|
||||
# Benchmarks
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
Hugging Face's Benchmarking tools are deprecated and it is advised to use external Benchmarking libraries to measure the speed
|
||||
and memory complexity of Transformer models.
|
||||
|
||||
</Tip>
|
||||
|
||||
[[open-in-colab]]
|
||||
|
||||
Let's take a look at how 🤗 Transformer models can be benchmarked, best practices, and already available benchmarks.
|
||||
Let's take a look at how 🤗 Transformers models can be benchmarked, best practices, and already available benchmarks.
|
||||
|
||||
A notebook explaining in more detail how to benchmark 🤗 Transformer models can be found [here](https://github.com/huggingface/transformers/tree/master/notebooks/05-benchmark.ipynb).
|
||||
A notebook explaining in more detail how to benchmark 🤗 Transformers models can be found [here](https://github.com/huggingface/notebooks/tree/main/examples/benchmark.ipynb).
|
||||
|
||||
## How to benchmark 🤗 Transformer models
|
||||
## How to benchmark 🤗 Transformers models
|
||||
|
||||
The classes [`PyTorchBenchmark`] and [`TensorFlowBenchmark`] allow to flexibly benchmark 🤗 Transformer models. The benchmark classes allow us to measure the _peak memory usage_ and _required time_ for both _inference_ and _training_.
|
||||
The classes [`PyTorchBenchmark`] and [`TensorFlowBenchmark`] allow to flexibly benchmark 🤗 Transformers models. The benchmark classes allow us to measure the _peak memory usage_ and _required time_ for both _inference_ and _training_.
|
||||
|
||||
<Tip>
|
||||
|
||||
@ -32,18 +39,26 @@ backward pass.
|
||||
The benchmark classes [`PyTorchBenchmark`] and [`TensorFlowBenchmark`] expect an object of type [`PyTorchBenchmarkArguments`] and
|
||||
[`TensorFlowBenchmarkArguments`], respectively, for instantiation. [`PyTorchBenchmarkArguments`] and [`TensorFlowBenchmarkArguments`] are data classes and contain all relevant configurations for their corresponding benchmark class. In the following example, it is shown how a BERT model of type _bert-base-cased_ can be benchmarked.
|
||||
|
||||
<frameworkcontent>
|
||||
<pt>
|
||||
```py
|
||||
>>> from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
|
||||
|
||||
>>> args = PyTorchBenchmarkArguments(models=["bert-base-uncased"], batch_sizes=[8], sequence_lengths=[8, 32, 128, 512])
|
||||
>>> benchmark = PyTorchBenchmark(args)
|
||||
|
||||
===PT-TF-SPLIT===
|
||||
```
|
||||
</pt>
|
||||
<tf>
|
||||
```py
|
||||
>>> from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
|
||||
|
||||
>>> args = TensorFlowBenchmarkArguments(models=["bert-base-uncased"], batch_sizes=[8], sequence_lengths=[8, 32, 128, 512])
|
||||
>>> args = TensorFlowBenchmarkArguments(
|
||||
... models=["bert-base-uncased"], batch_sizes=[8], sequence_lengths=[8, 32, 128, 512]
|
||||
... )
|
||||
>>> benchmark = TensorFlowBenchmark(args)
|
||||
```
|
||||
</tf>
|
||||
</frameworkcontent>
|
||||
|
||||
Here, three arguments are given to the benchmark argument data classes, namely `models`, `batch_sizes`, and
|
||||
`sequence_lengths`. The argument `models` is required and expects a `list` of model identifiers from the
|
||||
@ -55,11 +70,10 @@ and `src/transformers/benchmark/benchmark_args_tf.py` (for Tensorflow). Alternat
|
||||
commands from root will print out a descriptive list of all configurable parameters for PyTorch and Tensorflow
|
||||
respectively.
|
||||
|
||||
<frameworkcontent>
|
||||
<pt>
|
||||
```bash
|
||||
python examples/pytorch/benchmarking/run_benchmark.py --help
|
||||
|
||||
===PT-TF-SPLIT===
|
||||
python examples/tensorflow/benchmarking/run_benchmark_tf.py --help
|
||||
```
|
||||
|
||||
An instantiated benchmark object can then simply be run by calling `benchmark.run()`.
|
||||
@ -110,8 +124,18 @@ bert-base-uncased 8 512 1539
|
||||
- gpu_power_watts: 280.0
|
||||
- gpu_performance_state: 2
|
||||
- use_tpu: False
|
||||
```
|
||||
</pt>
|
||||
<tf>
|
||||
```bash
|
||||
python examples/tensorflow/benchmarking/run_benchmark_tf.py --help
|
||||
```
|
||||
|
||||
===PT-TF-SPLIT===
|
||||
An instantiated benchmark object can then simply be run by calling `benchmark.run()`.
|
||||
|
||||
```py
|
||||
>>> results = benchmark.run()
|
||||
>>> print(results)
|
||||
>>> results = benchmark.run()
|
||||
>>> print(results)
|
||||
==================== INFERENCE - SPEED - RESULT ====================
|
||||
@ -158,6 +182,8 @@ bert-base-uncased 8 512 1770
|
||||
- gpu_performance_state: 2
|
||||
- use_tpu: False
|
||||
```
|
||||
</tf>
|
||||
</frameworkcontent>
|
||||
|
||||
By default, the _time_ and the _required memory_ for _inference_ are benchmarked. In the example output above the first
|
||||
two sections show the result corresponding to _inference time_ and _inference memory_. In addition, all relevant
|
||||
@ -171,10 +197,14 @@ Instead of benchmarking pre-trained models via their model identifier, _e.g._ `b
|
||||
alternatively benchmark an arbitrary configuration of any available model class. In this case, a `list` of
|
||||
configurations must be inserted with the benchmark args as follows.
|
||||
|
||||
<frameworkcontent>
|
||||
<pt>
|
||||
```py
|
||||
>>> from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments, BertConfig
|
||||
|
||||
>>> args = PyTorchBenchmarkArguments(models=["bert-base", "bert-384-hid", "bert-6-lay"], batch_sizes=[8], sequence_lengths=[8, 32, 128, 512])
|
||||
>>> args = PyTorchBenchmarkArguments(
|
||||
... models=["bert-base", "bert-384-hid", "bert-6-lay"], batch_sizes=[8], sequence_lengths=[8, 32, 128, 512]
|
||||
... )
|
||||
>>> config_base = BertConfig()
|
||||
>>> config_384_hid = BertConfig(hidden_size=384)
|
||||
>>> config_6_lay = BertConfig(num_hidden_layers=6)
|
||||
@ -240,11 +270,15 @@ bert-6-lay 8 512 1359
|
||||
- gpu_power_watts: 280.0
|
||||
- gpu_performance_state: 2
|
||||
- use_tpu: False
|
||||
|
||||
===PT-TF-SPLIT===
|
||||
```
|
||||
</pt>
|
||||
<tf>
|
||||
```py
|
||||
>>> from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments, BertConfig
|
||||
|
||||
>>> args = TensorFlowBenchmarkArguments(models=["bert-base", "bert-384-hid", "bert-6-lay"], batch_sizes=[8], sequence_lengths=[8, 32, 128, 512])
|
||||
>>> args = TensorFlowBenchmarkArguments(
|
||||
... models=["bert-base", "bert-384-hid", "bert-6-lay"], batch_sizes=[8], sequence_lengths=[8, 32, 128, 512]
|
||||
... )
|
||||
>>> config_base = BertConfig()
|
||||
>>> config_384_hid = BertConfig(hidden_size=384)
|
||||
>>> config_6_lay = BertConfig(num_hidden_layers=6)
|
||||
@ -311,6 +345,8 @@ bert-6-lay 8 512 1540
|
||||
- gpu_performance_state: 2
|
||||
- use_tpu: False
|
||||
```
|
||||
</tf>
|
||||
</frameworkcontent>
|
||||
|
||||
Again, _inference time_ and _required memory_ for _inference_ are measured, but this time for customized configurations
|
||||
of the `BertModel` class. This feature can especially be helpful when deciding for which configuration the model
|
||||
@ -343,5 +379,5 @@ available [here](https://docs.google.com/spreadsheets/d/1sryqufw2D0XlUH4sq3e9Wnx
|
||||
|
||||
With the new _benchmark_ tools, it is easier than ever to share your benchmark results with the community
|
||||
|
||||
- [PyTorch Benchmarking Results](https://github.com/huggingface/transformers/tree/master/examples/pytorch/benchmarking/README.md).
|
||||
- [TensorFlow Benchmarking Results](https://github.com/huggingface/transformers/tree/master/examples/tensorflow/benchmarking/README.md).
|
||||
- [PyTorch Benchmarking Results](https://github.com/huggingface/transformers/tree/main/examples/pytorch/benchmarking/README.md).
|
||||
- [TensorFlow Benchmarking Results](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/benchmarking/README.md).
|
||||
36
docs/source/en/bertology.mdx
Normal file
36
docs/source/en/bertology.mdx
Normal file
@ -0,0 +1,36 @@
|
||||
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
# BERTology
|
||||
|
||||
There is a growing field of study concerned with investigating the inner working of large-scale transformers like BERT
|
||||
(that some call "BERTology"). Some good examples of this field are:
|
||||
|
||||
|
||||
- BERT Rediscovers the Classical NLP Pipeline by Ian Tenney, Dipanjan Das, Ellie Pavlick:
|
||||
https://arxiv.org/abs/1905.05950
|
||||
- Are Sixteen Heads Really Better than One? by Paul Michel, Omer Levy, Graham Neubig: https://arxiv.org/abs/1905.10650
|
||||
- What Does BERT Look At? An Analysis of BERT's Attention by Kevin Clark, Urvashi Khandelwal, Omer Levy, Christopher D.
|
||||
Manning: https://arxiv.org/abs/1906.04341
|
||||
|
||||
In order to help this new field develop, we have included a few additional features in the BERT/GPT/GPT-2 models to
|
||||
help people access the inner representations, mainly adapted from the great work of Paul Michel
|
||||
(https://arxiv.org/abs/1905.10650):
|
||||
|
||||
|
||||
- accessing all the hidden-states of BERT/GPT/GPT-2,
|
||||
- accessing all the attention weights for each head of BERT/GPT/GPT-2,
|
||||
- retrieving heads output values and gradients to be able to compute head importance score and prune head as explained
|
||||
in https://arxiv.org/abs/1905.10650.
|
||||
|
||||
To help you understand and use these features, we have added a specific example script: [bertology.py](https://github.com/huggingface/transformers/tree/main/examples/research_projects/bertology/run_bertology.py) while extract information and prune a model pre-trained on
|
||||
GLUE.
|
||||
@ -62,3 +62,4 @@ This page regroups resources around 🤗 Transformers developed by the community
|
||||
| [Speech Emotion Classification with Wav2Vec2](https://github/m3hrdadfi/soxan/blob/main/notebooks/Emotion_recognition_in_Greek_speech_using_Wav2Vec2.ipynb) | How to leverage a pretrained Wav2Vec2 model for Emotion Classification on the MEGA dataset | [Mehrdad Farahani](https://github.com/m3hrdadfi) | [](https://colab.research.google.com/github/m3hrdadfi/soxan/blob/main/notebooks/Emotion_recognition_in_Greek_speech_using_Wav2Vec2.ipynb) |
|
||||
| [Detect objects in an image with DETR](https://github.com/NielsRogge/Transformers-Tutorials/blob/master/DETR/DETR_minimal_example_(with_DetrFeatureExtractor).ipynb) | How to use a trained *DetrForObjectDetection* model to detect objects in an image and visualize attention | [Niels Rogge](https://github.com/NielsRogge) | [](https://colab.research.google.com/github/NielsRogge/Transformers-Tutorials/blob/master/DETR/DETR_minimal_example_(with_DetrFeatureExtractor).ipynb) |
|
||||
| [Fine-tune DETR on a custom object detection dataset](https://github.com/NielsRogge/Transformers-Tutorials/blob/master/DETR/Fine_tuning_DetrForObjectDetection_on_custom_dataset_(balloon).ipynb) | How to fine-tune *DetrForObjectDetection* on a custom object detection dataset | [Niels Rogge](https://github.com/NielsRogge) | [](https://colab.research.google.com/github/NielsRogge/Transformers-Tutorials/blob/master/DETR/Fine_tuning_DetrForObjectDetection_on_custom_dataset_(balloon).ipynb) |
|
||||
| [Finetune T5 for Named Entity Recognition](https://github.com/ToluClassics/Notebooks/blob/main/T5_Ner_Finetuning.ipynb) | How to fine-tune *T5* on a Named Entity Recognition Task | [Ogundepo Odunayo](https://github.com/ToluClassics) | [](https://colab.research.google.com/drive/1obr78FY_cBmWY5ODViCmzdY6O1KB65Vc?usp=sharing) |
|
||||
1
docs/source/en/contributing.md
Symbolic link
1
docs/source/en/contributing.md
Symbolic link
@ -0,0 +1 @@
|
||||
../../../CONTRIBUTING.md
|
||||
162
docs/source/en/converting_tensorflow_models.mdx
Normal file
162
docs/source/en/converting_tensorflow_models.mdx
Normal file
@ -0,0 +1,162 @@
|
||||
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
# Converting Tensorflow Checkpoints
|
||||
|
||||
A command-line interface is provided to convert original Bert/GPT/GPT-2/Transformer-XL/XLNet/XLM checkpoints to models
|
||||
that can be loaded using the `from_pretrained` methods of the library.
|
||||
|
||||
<Tip>
|
||||
|
||||
Since 2.3.0 the conversion script is now part of the transformers CLI (**transformers-cli**) available in any
|
||||
transformers >= 2.3.0 installation.
|
||||
|
||||
The documentation below reflects the **transformers-cli convert** command format.
|
||||
|
||||
</Tip>
|
||||
|
||||
## BERT
|
||||
|
||||
You can convert any TensorFlow checkpoint for BERT (in particular [the pre-trained models released by Google](https://github.com/google-research/bert#pre-trained-models)) in a PyTorch save file by using the
|
||||
[convert_bert_original_tf_checkpoint_to_pytorch.py](https://github.com/huggingface/transformers/tree/main/src/transformers/models/bert/convert_bert_original_tf_checkpoint_to_pytorch.py) script.
|
||||
|
||||
This CLI takes as input a TensorFlow checkpoint (three files starting with `bert_model.ckpt`) and the associated
|
||||
configuration file (`bert_config.json`), and creates a PyTorch model for this configuration, loads the weights from
|
||||
the TensorFlow checkpoint in the PyTorch model and saves the resulting model in a standard PyTorch save file that can
|
||||
be imported using `from_pretrained()` (see example in [quicktour](quicktour) , [run_glue.py](https://github.com/huggingface/transformers/tree/main/examples/pytorch/text-classification/run_glue.py) ).
|
||||
|
||||
You only need to run this conversion script **once** to get a PyTorch model. You can then disregard the TensorFlow
|
||||
checkpoint (the three files starting with `bert_model.ckpt`) but be sure to keep the configuration file (\
|
||||
`bert_config.json`) and the vocabulary file (`vocab.txt`) as these are needed for the PyTorch model too.
|
||||
|
||||
To run this specific conversion script you will need to have TensorFlow and PyTorch installed (`pip install tensorflow`). The rest of the repository only requires PyTorch.
|
||||
|
||||
Here is an example of the conversion process for a pre-trained `BERT-Base Uncased` model:
|
||||
|
||||
```bash
|
||||
export BERT_BASE_DIR=/path/to/bert/uncased_L-12_H-768_A-12
|
||||
|
||||
transformers-cli convert --model_type bert \
|
||||
--tf_checkpoint $BERT_BASE_DIR/bert_model.ckpt \
|
||||
--config $BERT_BASE_DIR/bert_config.json \
|
||||
--pytorch_dump_output $BERT_BASE_DIR/pytorch_model.bin
|
||||
```
|
||||
|
||||
You can download Google's pre-trained models for the conversion [here](https://github.com/google-research/bert#pre-trained-models).
|
||||
|
||||
## ALBERT
|
||||
|
||||
Convert TensorFlow model checkpoints of ALBERT to PyTorch using the
|
||||
[convert_albert_original_tf_checkpoint_to_pytorch.py](https://github.com/huggingface/transformers/tree/main/src/transformers/models/albert/convert_albert_original_tf_checkpoint_to_pytorch.py) script.
|
||||
|
||||
The CLI takes as input a TensorFlow checkpoint (three files starting with `model.ckpt-best`) and the accompanying
|
||||
configuration file (`albert_config.json`), then creates and saves a PyTorch model. To run this conversion you will
|
||||
need to have TensorFlow and PyTorch installed.
|
||||
|
||||
Here is an example of the conversion process for the pre-trained `ALBERT Base` model:
|
||||
|
||||
```bash
|
||||
export ALBERT_BASE_DIR=/path/to/albert/albert_base
|
||||
|
||||
transformers-cli convert --model_type albert \
|
||||
--tf_checkpoint $ALBERT_BASE_DIR/model.ckpt-best \
|
||||
--config $ALBERT_BASE_DIR/albert_config.json \
|
||||
--pytorch_dump_output $ALBERT_BASE_DIR/pytorch_model.bin
|
||||
```
|
||||
|
||||
You can download Google's pre-trained models for the conversion [here](https://github.com/google-research/albert#pre-trained-models).
|
||||
|
||||
## OpenAI GPT
|
||||
|
||||
Here is an example of the conversion process for a pre-trained OpenAI GPT model, assuming that your NumPy checkpoint
|
||||
save as the same format than OpenAI pretrained model (see [here](https://github.com/openai/finetune-transformer-lm)\
|
||||
)
|
||||
|
||||
```bash
|
||||
export OPENAI_GPT_CHECKPOINT_FOLDER_PATH=/path/to/openai/pretrained/numpy/weights
|
||||
|
||||
transformers-cli convert --model_type gpt \
|
||||
--tf_checkpoint $OPENAI_GPT_CHECKPOINT_FOLDER_PATH \
|
||||
--pytorch_dump_output $PYTORCH_DUMP_OUTPUT \
|
||||
[--config OPENAI_GPT_CONFIG] \
|
||||
[--finetuning_task_name OPENAI_GPT_FINETUNED_TASK] \
|
||||
```
|
||||
|
||||
## OpenAI GPT-2
|
||||
|
||||
Here is an example of the conversion process for a pre-trained OpenAI GPT-2 model (see [here](https://github.com/openai/gpt-2))
|
||||
|
||||
```bash
|
||||
export OPENAI_GPT2_CHECKPOINT_PATH=/path/to/gpt2/pretrained/weights
|
||||
|
||||
transformers-cli convert --model_type gpt2 \
|
||||
--tf_checkpoint $OPENAI_GPT2_CHECKPOINT_PATH \
|
||||
--pytorch_dump_output $PYTORCH_DUMP_OUTPUT \
|
||||
[--config OPENAI_GPT2_CONFIG] \
|
||||
[--finetuning_task_name OPENAI_GPT2_FINETUNED_TASK]
|
||||
```
|
||||
|
||||
## Transformer-XL
|
||||
|
||||
Here is an example of the conversion process for a pre-trained Transformer-XL model (see [here](https://github.com/kimiyoung/transformer-xl/tree/master/tf#obtain-and-evaluate-pretrained-sota-models))
|
||||
|
||||
```bash
|
||||
export TRANSFO_XL_CHECKPOINT_FOLDER_PATH=/path/to/transfo/xl/checkpoint
|
||||
|
||||
transformers-cli convert --model_type transfo_xl \
|
||||
--tf_checkpoint $TRANSFO_XL_CHECKPOINT_FOLDER_PATH \
|
||||
--pytorch_dump_output $PYTORCH_DUMP_OUTPUT \
|
||||
[--config TRANSFO_XL_CONFIG] \
|
||||
[--finetuning_task_name TRANSFO_XL_FINETUNED_TASK]
|
||||
```
|
||||
|
||||
## XLNet
|
||||
|
||||
Here is an example of the conversion process for a pre-trained XLNet model:
|
||||
|
||||
```bash
|
||||
export TRANSFO_XL_CHECKPOINT_PATH=/path/to/xlnet/checkpoint
|
||||
export TRANSFO_XL_CONFIG_PATH=/path/to/xlnet/config
|
||||
|
||||
transformers-cli convert --model_type xlnet \
|
||||
--tf_checkpoint $TRANSFO_XL_CHECKPOINT_PATH \
|
||||
--config $TRANSFO_XL_CONFIG_PATH \
|
||||
--pytorch_dump_output $PYTORCH_DUMP_OUTPUT \
|
||||
[--finetuning_task_name XLNET_FINETUNED_TASK] \
|
||||
```
|
||||
|
||||
## XLM
|
||||
|
||||
Here is an example of the conversion process for a pre-trained XLM model:
|
||||
|
||||
```bash
|
||||
export XLM_CHECKPOINT_PATH=/path/to/xlm/checkpoint
|
||||
|
||||
transformers-cli convert --model_type xlm \
|
||||
--tf_checkpoint $XLM_CHECKPOINT_PATH \
|
||||
--pytorch_dump_output $PYTORCH_DUMP_OUTPUT
|
||||
[--config XML_CONFIG] \
|
||||
[--finetuning_task_name XML_FINETUNED_TASK]
|
||||
```
|
||||
|
||||
## T5
|
||||
|
||||
Here is an example of the conversion process for a pre-trained T5 model:
|
||||
|
||||
```bash
|
||||
export T5=/path/to/t5/uncased_L-12_H-768_A-12
|
||||
|
||||
transformers-cli convert --model_type t5 \
|
||||
--tf_checkpoint $T5/t5_model.ckpt \
|
||||
--config $T5/t5_config.json \
|
||||
--pytorch_dump_output $T5/pytorch_model.bin
|
||||
```
|
||||
355
docs/source/en/create_a_model.mdx
Normal file
355
docs/source/en/create_a_model.mdx
Normal file
@ -0,0 +1,355 @@
|
||||
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
# Create a custom architecture
|
||||
|
||||
An [`AutoClass`](model_doc/auto) automatically infers the model architecture and downloads pretrained configuration and weights. Generally, we recommend using an `AutoClass` to produce checkpoint-agnostic code. But users who want more control over specific model parameters can create a custom 🤗 Transformers model from just a few base classes. This could be particularly useful for anyone who is interested in studying, training or experimenting with a 🤗 Transformers model. In this guide, dive deeper into creating a custom model without an `AutoClass`. Learn how to:
|
||||
|
||||
- Load and customize a model configuration.
|
||||
- Create a model architecture.
|
||||
- Create a slow and fast tokenizer for text.
|
||||
- Create a feature extractor for audio or image tasks.
|
||||
- Create a processor for multimodal tasks.
|
||||
|
||||
## Configuration
|
||||
|
||||
A [configuration](main_classes/configuration) refers to a model's specific attributes. Each model configuration has different attributes; for instance, all NLP models have the `hidden_size`, `num_attention_heads`, `num_hidden_layers` and `vocab_size` attributes in common. These attributes specify the number of attention heads or hidden layers to construct a model with.
|
||||
|
||||
Get a closer look at [DistilBERT](model_doc/distilbert) by accessing [`DistilBertConfig`] to inspect it's attributes:
|
||||
|
||||
```py
|
||||
>>> from transformers import DistilBertConfig
|
||||
|
||||
>>> config = DistilBertConfig()
|
||||
>>> print(config)
|
||||
DistilBertConfig {
|
||||
"activation": "gelu",
|
||||
"attention_dropout": 0.1,
|
||||
"dim": 768,
|
||||
"dropout": 0.1,
|
||||
"hidden_dim": 3072,
|
||||
"initializer_range": 0.02,
|
||||
"max_position_embeddings": 512,
|
||||
"model_type": "distilbert",
|
||||
"n_heads": 12,
|
||||
"n_layers": 6,
|
||||
"pad_token_id": 0,
|
||||
"qa_dropout": 0.1,
|
||||
"seq_classif_dropout": 0.2,
|
||||
"sinusoidal_pos_embds": false,
|
||||
"transformers_version": "4.16.2",
|
||||
"vocab_size": 30522
|
||||
}
|
||||
```
|
||||
|
||||
[`DistilBertConfig`] displays all the default attributes used to build a base [`DistilBertModel`]. All attributes are customizable, creating space for experimentation. For example, you can customize a default model to:
|
||||
|
||||
- Try a different activation function with the `activation` parameter.
|
||||
- Use a higher dropout ratio for the attention probabilities with the `attention_dropout` parameter.
|
||||
|
||||
```py
|
||||
>>> my_config = DistilBertConfig(activation="relu", attention_dropout=0.4)
|
||||
>>> print(my_config)
|
||||
DistilBertConfig {
|
||||
"activation": "relu",
|
||||
"attention_dropout": 0.4,
|
||||
"dim": 768,
|
||||
"dropout": 0.1,
|
||||
"hidden_dim": 3072,
|
||||
"initializer_range": 0.02,
|
||||
"max_position_embeddings": 512,
|
||||
"model_type": "distilbert",
|
||||
"n_heads": 12,
|
||||
"n_layers": 6,
|
||||
"pad_token_id": 0,
|
||||
"qa_dropout": 0.1,
|
||||
"seq_classif_dropout": 0.2,
|
||||
"sinusoidal_pos_embds": false,
|
||||
"transformers_version": "4.16.2",
|
||||
"vocab_size": 30522
|
||||
}
|
||||
```
|
||||
|
||||
Pretrained model attributes can be modified in the [`~PretrainedConfig.from_pretrained`] function:
|
||||
|
||||
```py
|
||||
>>> my_config = DistilBertConfig.from_pretrained("distilbert-base-uncased", activation="relu", attention_dropout=0.4)
|
||||
```
|
||||
|
||||
Once you are satisfied with your model configuration, you can save it with [`~PretrainedConfig.save_pretrained`]. Your configuration file is stored as a JSON file in the specified save directory:
|
||||
|
||||
```py
|
||||
>>> my_config.save_pretrained(save_directory="./your_model_save_path")
|
||||
```
|
||||
|
||||
To reuse the configuration file, load it with [`~PretrainedConfig.from_pretrained`]:
|
||||
|
||||
```py
|
||||
>>> my_config = DistilBertConfig.from_pretrained("./your_model_save_path/my_config.json")
|
||||
```
|
||||
|
||||
<Tip>
|
||||
|
||||
You can also save your configuration file as a dictionary or even just the difference between your custom configuration attributes and the default configuration attributes! See the [configuration](main_classes/configuration) documentation for more details.
|
||||
|
||||
</Tip>
|
||||
|
||||
## Model
|
||||
|
||||
The next step is to create a [model](main_classes/models). The model - also loosely referred to as the architecture - defines what each layer is doing and what operations are happening. Attributes like `num_hidden_layers` from the configuration are used to define the architecture. Every model shares the base class [`PreTrainedModel`] and a few common methods like resizing input embeddings and pruning self-attention heads. In addition, all models are also either a [`torch.nn.Module`](https://pytorch.org/docs/stable/generated/torch.nn.Module.html), [`tf.keras.Model`](https://www.tensorflow.org/api_docs/python/tf/keras/Model) or [`flax.linen.Module`](https://flax.readthedocs.io/en/latest/flax.linen.html#module) subclass. This means models are compatible with each of their respective framework's usage.
|
||||
|
||||
<frameworkcontent>
|
||||
<pt>
|
||||
Load your custom configuration attributes into the model:
|
||||
|
||||
```py
|
||||
>>> from transformers import DistilBertModel
|
||||
|
||||
>>> my_config = DistilBertConfig.from_pretrained("./your_model_save_path/my_config.json")
|
||||
>>> model = DistilBertModel(my_config)
|
||||
```
|
||||
|
||||
This creates a model with random values instead of pretrained weights. You won't be able to use this model for anything useful yet until you train it. Training is a costly and time-consuming process. It is generally better to use a pretrained model to obtain better results faster, while using only a fraction of the resources required for training.
|
||||
|
||||
Create a pretrained model with [`~PreTrainedModel.from_pretrained`]:
|
||||
|
||||
```py
|
||||
>>> model = DistilBertModel.from_pretrained("distilbert-base-uncased")
|
||||
```
|
||||
|
||||
When you load pretrained weights, the default model configuration is automatically loaded if the model is provided by 🤗 Transformers. However, you can still replace - some or all of - the default model configuration attributes with your own if you'd like:
|
||||
|
||||
```py
|
||||
>>> model = DistilBertModel.from_pretrained("distilbert-base-uncased", config=my_config)
|
||||
```
|
||||
</pt>
|
||||
<tf>
|
||||
Load your custom configuration attributes into the model:
|
||||
|
||||
```py
|
||||
>>> from transformers import TFDistilBertModel
|
||||
|
||||
>>> my_config = DistilBertConfig.from_pretrained("./your_model_save_path/my_config.json")
|
||||
>>> tf_model = TFDistilBertModel(my_config)
|
||||
```
|
||||
|
||||
This creates a model with random values instead of pretrained weights. You won't be able to use this model for anything useful yet until you train it. Training is a costly and time-consuming process. It is generally better to use a pretrained model to obtain better results faster, while using only a fraction of the resources required for training.
|
||||
|
||||
Create a pretrained model with [`~TFPreTrainedModel.from_pretrained`]:
|
||||
|
||||
```py
|
||||
>>> tf_model = TFDistilBertModel.from_pretrained("distilbert-base-uncased")
|
||||
```
|
||||
|
||||
When you load pretrained weights, the default model configuration is automatically loaded if the model is provided by 🤗 Transformers. However, you can still replace - some or all of - the default model configuration attributes with your own if you'd like:
|
||||
|
||||
```py
|
||||
>>> tf_model = TFDistilBertModel.from_pretrained("distilbert-base-uncased", config=my_config)
|
||||
```
|
||||
</tf>
|
||||
</frameworkcontent>
|
||||
|
||||
### Model heads
|
||||
|
||||
At this point, you have a base DistilBERT model which outputs the *hidden states*. The hidden states are passed as inputs to a model head to produce the final output. 🤗 Transformers provides a different model head for each task as long as a model supports the task (i.e., you can't use DistilBERT for a sequence-to-sequence task like translation).
|
||||
|
||||
<frameworkcontent>
|
||||
<pt>
|
||||
For example, [`DistilBertForSequenceClassification`] is a base DistilBERT model with a sequence classification head. The sequence classification head is a linear layer on top of the pooled outputs.
|
||||
|
||||
```py
|
||||
>>> from transformers import DistilBertForSequenceClassification
|
||||
|
||||
>>> model = DistilBertForSequenceClassification.from_pretrained("distilbert-base-uncased")
|
||||
```
|
||||
|
||||
Easily reuse this checkpoint for another task by switching to a different model head. For a question answering task, you would use the [`DistilBertForQuestionAnswering`] model head. The question answering head is similar to the sequence classification head except it is a linear layer on top of the hidden states output.
|
||||
|
||||
```py
|
||||
>>> from transformers import DistilBertForQuestionAnswering
|
||||
|
||||
>>> model = DistilBertForQuestionAnswering.from_pretrained("distilbert-base-uncased")
|
||||
```
|
||||
</pt>
|
||||
<tf>
|
||||
For example, [`TFDistilBertForSequenceClassification`] is a base DistilBERT model with a sequence classification head. The sequence classification head is a linear layer on top of the pooled outputs.
|
||||
|
||||
```py
|
||||
>>> from transformers import TFDistilBertForSequenceClassification
|
||||
|
||||
>>> tf_model = TFDistilBertForSequenceClassification.from_pretrained("distilbert-base-uncased")
|
||||
```
|
||||
|
||||
Easily reuse this checkpoint for another task by switching to a different model head. For a question answering task, you would use the [`TFDistilBertForQuestionAnswering`] model head. The question answering head is similar to the sequence classification head except it is a linear layer on top of the hidden states output.
|
||||
|
||||
```py
|
||||
>>> from transformers import TFDistilBertForQuestionAnswering
|
||||
|
||||
>>> tf_model = TFDistilBertForQuestionAnswering.from_pretrained("distilbert-base-uncased")
|
||||
```
|
||||
</tf>
|
||||
</frameworkcontent>
|
||||
|
||||
## Tokenizer
|
||||
|
||||
The last base class you need before using a model for textual data is a [tokenizer](main_classes/tokenizer) to convert raw text to tensors. There are two types of tokenizers you can use with 🤗 Transformers:
|
||||
|
||||
- [`PreTrainedTokenizer`]: a Python implementation of a tokenizer.
|
||||
- [`PreTrainedTokenizerFast`]: a tokenizer from our Rust-based [🤗 Tokenizer](https://huggingface.co/docs/tokenizers/python/latest/) library. This tokenizer type is significantly faster - especially during batch tokenization - due to it's Rust implementation. The fast tokenizer also offers additional methods like *offset mapping* which maps tokens to their original words or characters.
|
||||
|
||||
Both tokenizers support common methods such as encoding and decoding, adding new tokens, and managing special tokens.
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
Not every model supports a fast tokenizer. Take a look at this [table](index#supported-frameworks) to check if a model has fast tokenizer support.
|
||||
|
||||
</Tip>
|
||||
|
||||
If you trained your own tokenizer, you can create one from your *vocabulary* file:
|
||||
|
||||
```py
|
||||
>>> from transformers import DistilBertTokenizer
|
||||
|
||||
>>> my_tokenizer = DistilBertTokenizer(vocab_file="my_vocab_file.txt", do_lower_case=False, padding_side="left")
|
||||
```
|
||||
|
||||
It is important to remember the vocabulary from a custom tokenizer will be different from the vocabulary generated by a pretrained model's tokenizer. You need to use a pretrained model's vocabulary if you are using a pretrained model, otherwise the inputs won't make sense. Create a tokenizer with a pretrained model's vocabulary with the [`DistilBertTokenizer`] class:
|
||||
|
||||
```py
|
||||
>>> from transformers import DistilBertTokenizer
|
||||
|
||||
>>> slow_tokenizer = DistilBertTokenizer.from_pretrained("distilbert-base-uncased")
|
||||
```
|
||||
|
||||
Create a fast tokenizer with the [`DistilBertTokenizerFast`] class:
|
||||
|
||||
```py
|
||||
>>> from transformers import DistilBertTokenizerFast
|
||||
|
||||
>>> fast_tokenizer = DistilBertTokenizerFast.from_pretrained("distilbert-base-uncased")
|
||||
```
|
||||
|
||||
<Tip>
|
||||
|
||||
By default, [`AutoTokenizer`] will try to load a fast tokenizer. You can disable this behavior by setting `use_fast=False` in `from_pretrained`.
|
||||
|
||||
</Tip>
|
||||
|
||||
## Feature Extractor
|
||||
|
||||
A feature extractor processes audio or image inputs. It inherits from the base [`~feature_extraction_utils.FeatureExtractionMixin`] class, and may also inherit from the [`ImageFeatureExtractionMixin`] class for processing image features or the [`SequenceFeatureExtractor`] class for processing audio inputs.
|
||||
|
||||
Depending on whether you are working on an audio or vision task, create a feature extractor associated with the model you're using. For example, create a default [`ViTFeatureExtractor`] if you are using [ViT](model_doc/vit) for image classification:
|
||||
|
||||
```py
|
||||
>>> from transformers import ViTFeatureExtractor
|
||||
|
||||
>>> vit_extractor = ViTFeatureExtractor()
|
||||
>>> print(vit_extractor)
|
||||
ViTFeatureExtractor {
|
||||
"do_normalize": true,
|
||||
"do_resize": true,
|
||||
"feature_extractor_type": "ViTFeatureExtractor",
|
||||
"image_mean": [
|
||||
0.5,
|
||||
0.5,
|
||||
0.5
|
||||
],
|
||||
"image_std": [
|
||||
0.5,
|
||||
0.5,
|
||||
0.5
|
||||
],
|
||||
"resample": 2,
|
||||
"size": 224
|
||||
}
|
||||
```
|
||||
|
||||
<Tip>
|
||||
|
||||
If you aren't looking for any customization, just use the `from_pretrained` method to load a model's default feature extractor parameters.
|
||||
|
||||
</Tip>
|
||||
|
||||
Modify any of the [`ViTFeatureExtractor`] parameters to create your custom feature extractor:
|
||||
|
||||
```py
|
||||
>>> from transformers import ViTFeatureExtractor
|
||||
|
||||
>>> my_vit_extractor = ViTFeatureExtractor(resample="PIL.Image.BOX", do_normalize=False, image_mean=[0.3, 0.3, 0.3])
|
||||
>>> print(my_vit_extractor)
|
||||
ViTFeatureExtractor {
|
||||
"do_normalize": false,
|
||||
"do_resize": true,
|
||||
"feature_extractor_type": "ViTFeatureExtractor",
|
||||
"image_mean": [
|
||||
0.3,
|
||||
0.3,
|
||||
0.3
|
||||
],
|
||||
"image_std": [
|
||||
0.5,
|
||||
0.5,
|
||||
0.5
|
||||
],
|
||||
"resample": "PIL.Image.BOX",
|
||||
"size": 224
|
||||
}
|
||||
```
|
||||
|
||||
For audio inputs, you can create a [`Wav2Vec2FeatureExtractor`] and customize the parameters in a similar way:
|
||||
|
||||
```py
|
||||
>>> from transformers import Wav2Vec2FeatureExtractor
|
||||
|
||||
>>> w2v2_extractor = Wav2Vec2FeatureExtractor()
|
||||
>>> print(w2v2_extractor)
|
||||
Wav2Vec2FeatureExtractor {
|
||||
"do_normalize": true,
|
||||
"feature_extractor_type": "Wav2Vec2FeatureExtractor",
|
||||
"feature_size": 1,
|
||||
"padding_side": "right",
|
||||
"padding_value": 0.0,
|
||||
"return_attention_mask": false,
|
||||
"sampling_rate": 16000
|
||||
}
|
||||
```
|
||||
|
||||
## Processor
|
||||
|
||||
For models that support multimodal tasks, 🤗 Transformers offers a processor class that conveniently wraps a feature extractor and tokenizer into a single object. For example, let's use the [`Wav2Vec2Processor`] for an automatic speech recognition task (ASR). ASR transcribes audio to text, so you will need a feature extractor and a tokenizer.
|
||||
|
||||
Create a feature extractor to handle the audio inputs:
|
||||
|
||||
```py
|
||||
>>> from transformers import Wav2Vec2FeatureExtractor
|
||||
|
||||
>>> feature_extractor = Wav2Vec2FeatureExtractor(padding_value=1.0, do_normalize=True)
|
||||
```
|
||||
|
||||
Create a tokenizer to handle the text inputs:
|
||||
|
||||
```py
|
||||
>>> from transformers import Wav2Vec2CTCTokenizer
|
||||
|
||||
>>> tokenizer = Wav2Vec2CTCTokenizer(vocab_file="my_vocab_file.txt")
|
||||
```
|
||||
|
||||
Combine the feature extractor and tokenizer in [`Wav2Vec2Processor`]:
|
||||
|
||||
```py
|
||||
>>> from transformers import Wav2Vec2Processor
|
||||
|
||||
>>> processor = Wav2Vec2Processor(feature_extractor=feature_extractor, tokenizer=tokenizer)
|
||||
```
|
||||
|
||||
With two basic classes - configuration and model - and an additional preprocessing class (tokenizer, feature extractor, or processor), you can create any of the models supported by 🤗 Transformers. Each of these base classes are configurable, allowing you to use the specific attributes you want. You can easily setup a model for training or modify an existing pretrained model to fine-tune.
|
||||
349
docs/source/en/custom_models.mdx
Normal file
349
docs/source/en/custom_models.mdx
Normal file
@ -0,0 +1,349 @@
|
||||
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
# Sharing custom models
|
||||
|
||||
The 🤗 Transformers library is designed to be easily extensible. Every model is fully coded in a given subfolder
|
||||
of the repository with no abstraction, so you can easily copy a modeling file and tweak it to your needs.
|
||||
|
||||
If you are writing a brand new model, it might be easier to start from scratch. In this tutorial, we will show you
|
||||
how to write a custom model and its configuration so it can be used inside Transformers, and how you can share it
|
||||
with the community (with the code it relies on) so that anyone can use it, even if it's not present in the 🤗
|
||||
Transformers library.
|
||||
|
||||
We will illustrate all of this on a ResNet model, by wrapping the ResNet class of the
|
||||
[timm library](https://github.com/rwightman/pytorch-image-models/tree/master/timm) into a [`PreTrainedModel`].
|
||||
|
||||
## Writing a custom configuration
|
||||
|
||||
Before we dive into the model, let's first write its configuration. The configuration of a model is an object that
|
||||
will contain all the necessary information to build the model. As we will see in the next section, the model can only
|
||||
take a `config` to be initialized, so we really need that object to be as complete as possible.
|
||||
|
||||
In our example, we will take a couple of arguments of the ResNet class that we might want to tweak. Different
|
||||
configurations will then give us the different types of ResNets that are possible. We then just store those arguments,
|
||||
after checking the validity of a few of them.
|
||||
|
||||
```python
|
||||
from transformers import PretrainedConfig
|
||||
from typing import List
|
||||
|
||||
|
||||
class ResnetConfig(PretrainedConfig):
|
||||
model_type = "resnet"
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
block_type="bottleneck",
|
||||
layers: List[int] = [3, 4, 6, 3],
|
||||
num_classes: int = 1000,
|
||||
input_channels: int = 3,
|
||||
cardinality: int = 1,
|
||||
base_width: int = 64,
|
||||
stem_width: int = 64,
|
||||
stem_type: str = "",
|
||||
avg_down: bool = False,
|
||||
**kwargs,
|
||||
):
|
||||
if block_type not in ["basic", "bottleneck"]:
|
||||
raise ValueError(f"`block` must be 'basic' or bottleneck', got {block}.")
|
||||
if stem_type not in ["", "deep", "deep-tiered"]:
|
||||
raise ValueError(f"`stem_type` must be '', 'deep' or 'deep-tiered', got {block}.")
|
||||
|
||||
self.block_type = block_type
|
||||
self.layers = layers
|
||||
self.num_classes = num_classes
|
||||
self.input_channels = input_channels
|
||||
self.cardinality = cardinality
|
||||
self.base_width = base_width
|
||||
self.stem_width = stem_width
|
||||
self.stem_type = stem_type
|
||||
self.avg_down = avg_down
|
||||
super().__init__(**kwargs)
|
||||
```
|
||||
|
||||
The three important things to remember when writing you own configuration are the following:
|
||||
- you have to inherit from `PretrainedConfig`,
|
||||
- the `__init__` of your `PretrainedConfig` must accept any kwargs,
|
||||
- those `kwargs` need to be passed to the superclass `__init__`.
|
||||
|
||||
The inheritance is to make sure you get all the functionality from the 🤗 Transformers library, while the two other
|
||||
constraints come from the fact a `PretrainedConfig` has more fields than the ones you are setting. When reloading a
|
||||
config with the `from_pretrained` method, those fields need to be accepted by your config and then sent to the
|
||||
superclass.
|
||||
|
||||
Defining a `model_type` for your configuration (here `model_type="resnet"`) is not mandatory, unless you want to
|
||||
register your model with the auto classes (see last section).
|
||||
|
||||
With this done, you can easily create and save your configuration like you would do with any other model config of the
|
||||
library. Here is how we can create a resnet50d config and save it:
|
||||
|
||||
```py
|
||||
resnet50d_config = ResnetConfig(block_type="bottleneck", stem_width=32, stem_type="deep", avg_down=True)
|
||||
resnet50d_config.save_pretrained("custom-resnet")
|
||||
```
|
||||
|
||||
This will save a file named `config.json` inside the folder `custom-resnet`. You can then reload your config with the
|
||||
`from_pretrained` method:
|
||||
|
||||
```py
|
||||
resnet50d_config = ResnetConfig.from_pretrained("custom-resnet")
|
||||
```
|
||||
|
||||
You can also use any other method of the [`PretrainedConfig`] class, like [`~PretrainedConfig.push_to_hub`] to
|
||||
directly upload your config to the Hub.
|
||||
|
||||
## Writing a custom model
|
||||
|
||||
Now that we have our ResNet configuration, we can go on writing the model. We will actually write two: one that
|
||||
extracts the hidden features from a batch of images (like [`BertModel`]) and one that is suitable for image
|
||||
classification (like [`BertModelForSequenceClassification`]).
|
||||
|
||||
As we mentioned before, we'll only write a loose wrapper of the model to keep it simple for this example. The only
|
||||
thing we need to do before writing this class is a map between the block types and actual block classes. Then the
|
||||
model is defined from the configuration by passing everything to the `ResNet` class:
|
||||
|
||||
```py
|
||||
from transformers import PreTrainedModel
|
||||
from timm.models.resnet import BasicBlock, Bottleneck, ResNet
|
||||
from .configuration_resnet import ResnetConfig
|
||||
|
||||
|
||||
BLOCK_MAPPING = {"basic": BasicBlock, "bottleneck": Bottleneck}
|
||||
|
||||
|
||||
class ResnetModel(PreTrainedModel):
|
||||
config_class = ResnetConfig
|
||||
|
||||
def __init__(self, config):
|
||||
super().__init__(config)
|
||||
block_layer = BLOCK_MAPPING[config.block_type]
|
||||
self.model = ResNet(
|
||||
block_layer,
|
||||
config.layers,
|
||||
num_classes=config.num_classes,
|
||||
in_chans=config.input_channels,
|
||||
cardinality=config.cardinality,
|
||||
base_width=config.base_width,
|
||||
stem_width=config.stem_width,
|
||||
stem_type=config.stem_type,
|
||||
avg_down=config.avg_down,
|
||||
)
|
||||
|
||||
def forward(self, tensor):
|
||||
return self.model.forward_features(tensor)
|
||||
```
|
||||
|
||||
For the model that will classify images, we just change the forward method:
|
||||
|
||||
```py
|
||||
class ResnetModelForImageClassification(PreTrainedModel):
|
||||
config_class = ResnetConfig
|
||||
|
||||
def __init__(self, config):
|
||||
super().__init__(config)
|
||||
block_layer = BLOCK_MAPPING[config.block_type]
|
||||
self.model = ResNet(
|
||||
block_layer,
|
||||
config.layers,
|
||||
num_classes=config.num_classes,
|
||||
in_chans=config.input_channels,
|
||||
cardinality=config.cardinality,
|
||||
base_width=config.base_width,
|
||||
stem_width=config.stem_width,
|
||||
stem_type=config.stem_type,
|
||||
avg_down=config.avg_down,
|
||||
)
|
||||
|
||||
def forward(self, tensor, labels=None):
|
||||
logits = self.model(tensor)
|
||||
if labels is not None:
|
||||
loss = torch.nn.cross_entropy(logits, labels)
|
||||
return {"loss": loss, "logits": logits}
|
||||
return {"logits": logits}
|
||||
```
|
||||
|
||||
In both cases, notice how we inherit from `PreTrainedModel` and call the superclass initialization with the `config`
|
||||
(a bit like when you write a regular `torch.nn.Module`). The line that sets the `config_class` is not mandatory, unless
|
||||
you want to register your model with the auto classes (see last section).
|
||||
|
||||
<Tip>
|
||||
|
||||
If your model is very similar to a model inside the library, you can re-use the same configuration as this model.
|
||||
|
||||
</Tip>
|
||||
|
||||
You can have your model return anything you want, but returning a dictionary like we did for
|
||||
`ResnetModelForImageClassification`, with the loss included when labels are passed, will make your model directly
|
||||
usable inside the [`Trainer`] class. Using another output format is fine as long as you are planning on using your own
|
||||
training loop or another library for training.
|
||||
|
||||
Now that we have our model class, let's create one:
|
||||
|
||||
```py
|
||||
resnet50d = ResnetModelForImageClassification(resnet50d_config)
|
||||
```
|
||||
|
||||
Again, you can use any of the methods of [`PreTrainedModel`], like [`~PreTrainedModel.save_pretrained`] or
|
||||
[`~PreTrainedModel.push_to_hub`]. We will use the second in the next section, and see how to push the model weights
|
||||
with the code of our model. But first, let's load some pretrained weights inside our model.
|
||||
|
||||
In your own use case, you will probably be training your custom model on your own data. To go fast for this tutorial,
|
||||
we will use the pretrained version of the resnet50d. Since our model is just a wrapper around it, it's going to be
|
||||
easy to transfer those weights:
|
||||
|
||||
```py
|
||||
import timm
|
||||
|
||||
pretrained_model = timm.create_model("resnet50d", pretrained=True)
|
||||
resnet50d.model.load_state_dict(pretrained_model.state_dict())
|
||||
```
|
||||
|
||||
Now let's see how to make sure that when we do [`~PreTrainedModel.save_pretrained`] or [`~PreTrainedModel.push_to_hub`], the
|
||||
code of the model is saved.
|
||||
|
||||
## Sending the code to the Hub
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
This API is experimental and may have some slight breaking changes in the next releases.
|
||||
|
||||
</Tip>
|
||||
|
||||
First, make sure your model is fully defined in a `.py` file. It can rely on relative imports to some other files as
|
||||
long as all the files are in the same directory (we don't support submodules for this feature yet). For our example,
|
||||
we'll define a `modeling_resnet.py` file and a `configuration_resnet.py` file in a folder of the current working
|
||||
directory named `resnet_model`. The configuration file contains the code for `ResnetConfig` and the modeling file
|
||||
contains the code of `ResnetModel` and `ResnetModelForImageClassification`.
|
||||
|
||||
```
|
||||
.
|
||||
└── resnet_model
|
||||
├── __init__.py
|
||||
├── configuration_resnet.py
|
||||
└── modeling_resnet.py
|
||||
```
|
||||
|
||||
The `__init__.py` can be empty, it's just there so that Python detects `resnet_model` can be use as a module.
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
If copying a modeling files from the library, you will need to replace all the relative imports at the top of the file
|
||||
to import from the `transformers` package.
|
||||
|
||||
</Tip>
|
||||
|
||||
Note that you can re-use (or subclass) an existing configuration/model.
|
||||
|
||||
To share your model with the community, follow those steps: first import the ResNet model and config from the newly
|
||||
created files:
|
||||
|
||||
```py
|
||||
from resnet_model.configuration_resnet import ResnetConfig
|
||||
from resnet_model.modeling_resnet import ResnetModel, ResnetModelForImageClassification
|
||||
```
|
||||
|
||||
Then you have to tell the library you want to copy the code files of those objects when using the `save_pretrained`
|
||||
method and properly register them with a given Auto class (especially for models), just run:
|
||||
|
||||
```py
|
||||
ResnetConfig.register_for_auto_class()
|
||||
ResnetModel.register_for_auto_class("AutoModel")
|
||||
ResnetModelForImageClassification.register_for_auto_class("AutoModelForImageClassification")
|
||||
```
|
||||
|
||||
Note that there is no need to specify an auto class for the configuration (there is only one auto class for them,
|
||||
[`AutoConfig`]) but it's different for models. Your custom model could be suitable for many different tasks, so you
|
||||
have to specify which one of the auto classes is the correct one for your model.
|
||||
|
||||
Next, let's create the config and models as we did before:
|
||||
|
||||
```py
|
||||
resnet50d_config = ResnetConfig(block_type="bottleneck", stem_width=32, stem_type="deep", avg_down=True)
|
||||
resnet50d = ResnetModelForImageClassification(resnet50d_config)
|
||||
|
||||
pretrained_model = timm.create_model("resnet50d", pretrained=True)
|
||||
resnet50d.model.load_state_dict(pretrained_model.state_dict())
|
||||
```
|
||||
|
||||
Now to send the model to the Hub, make sure you are logged in. Either run in your terminal:
|
||||
|
||||
```bash
|
||||
huggingface-cli login
|
||||
```
|
||||
|
||||
or from a notebook:
|
||||
|
||||
```py
|
||||
from huggingface_hub import notebook_login
|
||||
|
||||
notebook_login()
|
||||
```
|
||||
|
||||
You can then push to to your own namespace (or an organization you are a member of) like this:
|
||||
|
||||
```py
|
||||
resnet50d.push_to_hub("custom-resnet50d")
|
||||
```
|
||||
|
||||
On top of the modeling weights and the configuration in json format, this also copied the modeling and
|
||||
configuration `.py` files in the folder `custom-resnet50d` and uploaded the result to the Hub. You can check the result
|
||||
in this [model repo](https://huggingface.co/sgugger/custom-resnet50d).
|
||||
|
||||
See the [sharing tutorial](model_sharing) for more information on the push to Hub method.
|
||||
|
||||
## Using a model with custom code
|
||||
|
||||
You can use any configuration, model or tokenizer with custom code files in its repository with the auto-classes and
|
||||
the `from_pretrained` method. All files and code uploaded to the Hub are scanned for malware (refer to the [Hub security](https://huggingface.co/docs/hub/security#malware-scanning) documentation for more information), but you should still
|
||||
review the model code and author to avoid executing malicious code on your machine. Set `trust_remote_code=True` to use
|
||||
a model with custom code:
|
||||
|
||||
```py
|
||||
from transformers import AutoModelForImageClassification
|
||||
|
||||
model = AutoModelForImageClassification.from_pretrained("sgugger/custom-resnet50d", trust_remote_code=True)
|
||||
```
|
||||
|
||||
It is also strongly encouraged to pass a commit hash as a `revision` to make sure the author of the models did not
|
||||
update the code with some malicious new lines (unless you fully trust the authors of the models).
|
||||
|
||||
```py
|
||||
commit_hash = "ed94a7c6247d8aedce4647f00f20de6875b5b292"
|
||||
model = AutoModelForImageClassification.from_pretrained(
|
||||
"sgugger/custom-resnet50d", trust_remote_code=True, revision=commit_hash
|
||||
)
|
||||
```
|
||||
|
||||
Note that when browsing the commit history of the model repo on the Hub, there is a button to easily copy the commit
|
||||
hash of any commit.
|
||||
|
||||
## Registering a model with custom code to the auto classes
|
||||
|
||||
If you are writing a library that extends 🤗 Transformers, you may want to extend the auto classes to include your own
|
||||
model. This is different from pushing the code to the Hub in the sense that users will need to import your library to
|
||||
get the custom models (contrarily to automatically downloading the model code from the Hub).
|
||||
|
||||
As long as your config has a `model_type` attribute that is different from existing model types, and that your model
|
||||
classes have the right `config_class` attributes, you can just add them to the auto classes likes this:
|
||||
|
||||
```py
|
||||
from transformers import AutoConfig, AutoModel, AutoModelForImageClassification
|
||||
|
||||
AutoConfig.register("resnet", ResnetConfig)
|
||||
AutoModel.register(ResnetConfig, ResnetModel)
|
||||
AutoModelForImageClassification.register(ResnetConfig, ResnetModelForImageClassification)
|
||||
```
|
||||
|
||||
Note that the first argument used when registering your custom config to [`AutoConfig`] needs to match the `model_type`
|
||||
of your custom config, and the first argument used when registering your custom models to any auto model class needs
|
||||
to match the `config_class` of those models.
|
||||
335
docs/source/en/debugging.mdx
Normal file
335
docs/source/en/debugging.mdx
Normal file
@ -0,0 +1,335 @@
|
||||
<!--Copyright 2021 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
# Debugging
|
||||
|
||||
## Multi-GPU Network Issues Debug
|
||||
|
||||
When training or inferencing with `DistributedDataParallel` and multiple GPU, if you run into issue of inter-communication between processes and/or nodes, you can use the following script to diagnose network issues.
|
||||
|
||||
```bash
|
||||
wget https://raw.githubusercontent.com/huggingface/transformers/main/scripts/distributed/torch-distributed-gpu-test.py
|
||||
```
|
||||
|
||||
For example to test how 2 GPUs interact do:
|
||||
|
||||
```bash
|
||||
python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
|
||||
```
|
||||
If both processes can talk to each and allocate GPU memory each will print an OK status.
|
||||
|
||||
For more GPUs or nodes adjust the arguments in the script.
|
||||
|
||||
You will find a lot more details inside the diagnostics script and even a recipe to how you could run it in a SLURM environment.
|
||||
|
||||
An additional level of debug is to add `NCCL_DEBUG=INFO` environment variable as follows:
|
||||
|
||||
```bash
|
||||
NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
|
||||
```
|
||||
|
||||
This will dump a lot of NCCL-related debug information, which you can then search online if you find that some problems are reported. Or if you're not sure how to interpret the output you can share the log file in an Issue.
|
||||
|
||||
|
||||
|
||||
## Underflow and Overflow Detection
|
||||
|
||||
<Tip>
|
||||
|
||||
This feature is currently available for PyTorch-only.
|
||||
|
||||
</Tip>
|
||||
|
||||
<Tip>
|
||||
|
||||
For multi-GPU training it requires DDP (`torch.distributed.launch`).
|
||||
|
||||
</Tip>
|
||||
|
||||
<Tip>
|
||||
|
||||
This feature can be used with any `nn.Module`-based model.
|
||||
|
||||
</Tip>
|
||||
|
||||
If you start getting `loss=NaN` or the model inhibits some other abnormal behavior due to `inf` or `nan` in
|
||||
activations or weights one needs to discover where the first underflow or overflow happens and what led to it. Luckily
|
||||
you can accomplish that easily by activating a special module that will do the detection automatically.
|
||||
|
||||
If you're using [`Trainer`], you just need to add:
|
||||
|
||||
```bash
|
||||
--debug underflow_overflow
|
||||
```
|
||||
|
||||
to the normal command line arguments, or pass `debug="underflow_overflow"` when creating the
|
||||
[`TrainingArguments`] object.
|
||||
|
||||
If you're using your own training loop or another Trainer you can accomplish the same with:
|
||||
|
||||
```python
|
||||
from .debug_utils import DebugUnderflowOverflow
|
||||
|
||||
debug_overflow = DebugUnderflowOverflow(model)
|
||||
```
|
||||
|
||||
[`~debug_utils.DebugUnderflowOverflow`] inserts hooks into the model that immediately after each
|
||||
forward call will test input and output variables and also the corresponding module's weights. As soon as `inf` or
|
||||
`nan` is detected in at least one element of the activations or weights, the program will assert and print a report
|
||||
like this (this was caught with `google/mt5-small` under fp16 mixed precision):
|
||||
|
||||
```
|
||||
Detected inf/nan during batch_number=0
|
||||
Last 21 forward frames:
|
||||
abs min abs max metadata
|
||||
encoder.block.1.layer.1.DenseReluDense.dropout Dropout
|
||||
0.00e+00 2.57e+02 input[0]
|
||||
0.00e+00 2.85e+02 output
|
||||
[...]
|
||||
encoder.block.2.layer.0 T5LayerSelfAttention
|
||||
6.78e-04 3.15e+03 input[0]
|
||||
2.65e-04 3.42e+03 output[0]
|
||||
None output[1]
|
||||
2.25e-01 1.00e+04 output[2]
|
||||
encoder.block.2.layer.1.layer_norm T5LayerNorm
|
||||
8.69e-02 4.18e-01 weight
|
||||
2.65e-04 3.42e+03 input[0]
|
||||
1.79e-06 4.65e+00 output
|
||||
encoder.block.2.layer.1.DenseReluDense.wi_0 Linear
|
||||
2.17e-07 4.50e+00 weight
|
||||
1.79e-06 4.65e+00 input[0]
|
||||
2.68e-06 3.70e+01 output
|
||||
encoder.block.2.layer.1.DenseReluDense.wi_1 Linear
|
||||
8.08e-07 2.66e+01 weight
|
||||
1.79e-06 4.65e+00 input[0]
|
||||
1.27e-04 2.37e+02 output
|
||||
encoder.block.2.layer.1.DenseReluDense.dropout Dropout
|
||||
0.00e+00 8.76e+03 input[0]
|
||||
0.00e+00 9.74e+03 output
|
||||
encoder.block.2.layer.1.DenseReluDense.wo Linear
|
||||
1.01e-06 6.44e+00 weight
|
||||
0.00e+00 9.74e+03 input[0]
|
||||
3.18e-04 6.27e+04 output
|
||||
encoder.block.2.layer.1.DenseReluDense T5DenseGatedGeluDense
|
||||
1.79e-06 4.65e+00 input[0]
|
||||
3.18e-04 6.27e+04 output
|
||||
encoder.block.2.layer.1.dropout Dropout
|
||||
3.18e-04 6.27e+04 input[0]
|
||||
0.00e+00 inf output
|
||||
```
|
||||
|
||||
The example output has been trimmed in the middle for brevity.
|
||||
|
||||
The second column shows the value of the absolute largest element, so if you have a closer look at the last few frames,
|
||||
the inputs and outputs were in the range of `1e4`. So when this training was done under fp16 mixed precision the very
|
||||
last step overflowed (since under `fp16` the largest number before `inf` is `64e3`). To avoid overflows under
|
||||
`fp16` the activations must remain way below `1e4`, because `1e4 * 1e4 = 1e8` so any matrix multiplication with
|
||||
large activations is going to lead to a numerical overflow condition.
|
||||
|
||||
At the very start of the trace you can discover at which batch number the problem occurred (here `Detected inf/nan during batch_number=0` means the problem occurred on the first batch).
|
||||
|
||||
Each reported frame starts by declaring the fully qualified entry for the corresponding module this frame is reporting
|
||||
for. If we look just at this frame:
|
||||
|
||||
```
|
||||
encoder.block.2.layer.1.layer_norm T5LayerNorm
|
||||
8.69e-02 4.18e-01 weight
|
||||
2.65e-04 3.42e+03 input[0]
|
||||
1.79e-06 4.65e+00 output
|
||||
```
|
||||
|
||||
Here, `encoder.block.2.layer.1.layer_norm` indicates that it was a layer norm for the first layer, of the second
|
||||
block of the encoder. And the specific calls of the `forward` is `T5LayerNorm`.
|
||||
|
||||
Let's look at the last few frames of that report:
|
||||
|
||||
```
|
||||
Detected inf/nan during batch_number=0
|
||||
Last 21 forward frames:
|
||||
abs min abs max metadata
|
||||
[...]
|
||||
encoder.block.2.layer.1.DenseReluDense.wi_0 Linear
|
||||
2.17e-07 4.50e+00 weight
|
||||
1.79e-06 4.65e+00 input[0]
|
||||
2.68e-06 3.70e+01 output
|
||||
encoder.block.2.layer.1.DenseReluDense.wi_1 Linear
|
||||
8.08e-07 2.66e+01 weight
|
||||
1.79e-06 4.65e+00 input[0]
|
||||
1.27e-04 2.37e+02 output
|
||||
encoder.block.2.layer.1.DenseReluDense.wo Linear
|
||||
1.01e-06 6.44e+00 weight
|
||||
0.00e+00 9.74e+03 input[0]
|
||||
3.18e-04 6.27e+04 output
|
||||
encoder.block.2.layer.1.DenseReluDense T5DenseGatedGeluDense
|
||||
1.79e-06 4.65e+00 input[0]
|
||||
3.18e-04 6.27e+04 output
|
||||
encoder.block.2.layer.1.dropout Dropout
|
||||
3.18e-04 6.27e+04 input[0]
|
||||
0.00e+00 inf output
|
||||
```
|
||||
|
||||
The last frame reports for `Dropout.forward` function with the first entry for the only input and the second for the
|
||||
only output. You can see that it was called from an attribute `dropout` inside `DenseReluDense` class. We can see
|
||||
that it happened during the first layer, of the 2nd block, during the very first batch. Finally, the absolute largest
|
||||
input elements was `6.27e+04` and same for the output was `inf`.
|
||||
|
||||
You can see here, that `T5DenseGatedGeluDense.forward` resulted in output activations, whose absolute max value was
|
||||
around 62.7K, which is very close to fp16's top limit of 64K. In the next frame we have `Dropout` which renormalizes
|
||||
the weights, after it zeroed some of the elements, which pushes the absolute max value to more than 64K, and we get an
|
||||
overflow (`inf`).
|
||||
|
||||
As you can see it's the previous frames that we need to look into when the numbers start going into very large for fp16
|
||||
numbers.
|
||||
|
||||
Let's match the report to the code from `models/t5/modeling_t5.py`:
|
||||
|
||||
```python
|
||||
class T5DenseGatedGeluDense(nn.Module):
|
||||
def __init__(self, config):
|
||||
super().__init__()
|
||||
self.wi_0 = nn.Linear(config.d_model, config.d_ff, bias=False)
|
||||
self.wi_1 = nn.Linear(config.d_model, config.d_ff, bias=False)
|
||||
self.wo = nn.Linear(config.d_ff, config.d_model, bias=False)
|
||||
self.dropout = nn.Dropout(config.dropout_rate)
|
||||
self.gelu_act = ACT2FN["gelu_new"]
|
||||
|
||||
def forward(self, hidden_states):
|
||||
hidden_gelu = self.gelu_act(self.wi_0(hidden_states))
|
||||
hidden_linear = self.wi_1(hidden_states)
|
||||
hidden_states = hidden_gelu * hidden_linear
|
||||
hidden_states = self.dropout(hidden_states)
|
||||
hidden_states = self.wo(hidden_states)
|
||||
return hidden_states
|
||||
```
|
||||
|
||||
Now it's easy to see the `dropout` call, and all the previous calls as well.
|
||||
|
||||
Since the detection is happening in a forward hook, these reports are printed immediately after each `forward`
|
||||
returns.
|
||||
|
||||
Going back to the full report, to act on it and to fix the problem, we need to go a few frames up where the numbers
|
||||
started to go up and most likely switch to the `fp32` mode here, so that the numbers don't overflow when multiplied
|
||||
or summed up. Of course, there might be other solutions. For example, we could turn off `amp` temporarily if it's
|
||||
enabled, after moving the original `forward` into a helper wrapper, like so:
|
||||
|
||||
```python
|
||||
def _forward(self, hidden_states):
|
||||
hidden_gelu = self.gelu_act(self.wi_0(hidden_states))
|
||||
hidden_linear = self.wi_1(hidden_states)
|
||||
hidden_states = hidden_gelu * hidden_linear
|
||||
hidden_states = self.dropout(hidden_states)
|
||||
hidden_states = self.wo(hidden_states)
|
||||
return hidden_states
|
||||
|
||||
|
||||
import torch
|
||||
|
||||
|
||||
def forward(self, hidden_states):
|
||||
if torch.is_autocast_enabled():
|
||||
with torch.cuda.amp.autocast(enabled=False):
|
||||
return self._forward(hidden_states)
|
||||
else:
|
||||
return self._forward(hidden_states)
|
||||
```
|
||||
|
||||
Since the automatic detector only reports on inputs and outputs of full frames, once you know where to look, you may
|
||||
want to analyse the intermediary stages of any specific `forward` function as well. In such a case you can use the
|
||||
`detect_overflow` helper function to inject the detector where you want it, for example:
|
||||
|
||||
```python
|
||||
from debug_utils import detect_overflow
|
||||
|
||||
|
||||
class T5LayerFF(nn.Module):
|
||||
[...]
|
||||
|
||||
def forward(self, hidden_states):
|
||||
forwarded_states = self.layer_norm(hidden_states)
|
||||
detect_overflow(forwarded_states, "after layer_norm")
|
||||
forwarded_states = self.DenseReluDense(forwarded_states)
|
||||
detect_overflow(forwarded_states, "after DenseReluDense")
|
||||
return hidden_states + self.dropout(forwarded_states)
|
||||
```
|
||||
|
||||
You can see that we added 2 of these and now we track if `inf` or `nan` for `forwarded_states` was detected
|
||||
somewhere in between.
|
||||
|
||||
Actually, the detector already reports these because each of the calls in the example above is a `nn.Module`, but
|
||||
let's say if you had some local direct calculations this is how you'd do that.
|
||||
|
||||
Additionally, if you're instantiating the debugger in your own code, you can adjust the number of frames printed from
|
||||
its default, e.g.:
|
||||
|
||||
```python
|
||||
from .debug_utils import DebugUnderflowOverflow
|
||||
|
||||
debug_overflow = DebugUnderflowOverflow(model, max_frames_to_save=100)
|
||||
```
|
||||
|
||||
### Specific batch absolute mix and max value tracing
|
||||
|
||||
The same debugging class can be used for per-batch tracing with the underflow/overflow detection feature turned off.
|
||||
|
||||
Let's say you want to watch the absolute min and max values for all the ingredients of each `forward` call of a given
|
||||
batch, and only do that for batches 1 and 3. Then you instantiate this class as:
|
||||
|
||||
```python
|
||||
debug_overflow = DebugUnderflowOverflow(model, trace_batch_nums=[1, 3])
|
||||
```
|
||||
|
||||
And now full batches 1 and 3 will be traced using the same format as the underflow/overflow detector does.
|
||||
|
||||
Batches are 0-indexed.
|
||||
|
||||
This is helpful if you know that the program starts misbehaving after a certain batch number, so you can fast-forward
|
||||
right to that area. Here is a sample truncated output for such configuration:
|
||||
|
||||
```
|
||||
*** Starting batch number=1 ***
|
||||
abs min abs max metadata
|
||||
shared Embedding
|
||||
1.01e-06 7.92e+02 weight
|
||||
0.00e+00 2.47e+04 input[0]
|
||||
5.36e-05 7.92e+02 output
|
||||
[...]
|
||||
decoder.dropout Dropout
|
||||
1.60e-07 2.27e+01 input[0]
|
||||
0.00e+00 2.52e+01 output
|
||||
decoder T5Stack
|
||||
not a tensor output
|
||||
lm_head Linear
|
||||
1.01e-06 7.92e+02 weight
|
||||
0.00e+00 1.11e+00 input[0]
|
||||
6.06e-02 8.39e+01 output
|
||||
T5ForConditionalGeneration
|
||||
not a tensor output
|
||||
|
||||
*** Starting batch number=3 ***
|
||||
abs min abs max metadata
|
||||
shared Embedding
|
||||
1.01e-06 7.92e+02 weight
|
||||
0.00e+00 2.78e+04 input[0]
|
||||
5.36e-05 7.92e+02 output
|
||||
[...]
|
||||
```
|
||||
|
||||
Here you will get a huge number of frames dumped - as many as there were forward calls in your model, so it may or may
|
||||
not what you want, but sometimes it can be easier to use for debugging purposes than a normal debugger. For example, if
|
||||
a problem starts happening at batch number 150. So you can dump traces for batches 149 and 150 and compare where
|
||||
numbers started to diverge.
|
||||
|
||||
You can also specify the batch number after which to stop the training, with:
|
||||
|
||||
```python
|
||||
debug_overflow = DebugUnderflowOverflow(model, trace_batch_nums=[1, 3], abort_after_batch_num=3)
|
||||
```
|
||||
70
docs/source/en/fast_tokenizers.mdx
Normal file
70
docs/source/en/fast_tokenizers.mdx
Normal file
@ -0,0 +1,70 @@
|
||||
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
# Use tokenizers from 🤗 Tokenizers
|
||||
|
||||
The [`PreTrainedTokenizerFast`] depends on the [🤗 Tokenizers](https://huggingface.co/docs/tokenizers) library. The tokenizers obtained from the 🤗 Tokenizers library can be
|
||||
loaded very simply into 🤗 Transformers.
|
||||
|
||||
Before getting in the specifics, let's first start by creating a dummy tokenizer in a few lines:
|
||||
|
||||
```python
|
||||
>>> from tokenizers import Tokenizer
|
||||
>>> from tokenizers.models import BPE
|
||||
>>> from tokenizers.trainers import BpeTrainer
|
||||
>>> from tokenizers.pre_tokenizers import Whitespace
|
||||
|
||||
>>> tokenizer = Tokenizer(BPE(unk_token="[UNK]"))
|
||||
>>> trainer = BpeTrainer(special_tokens=["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]"])
|
||||
|
||||
>>> tokenizer.pre_tokenizer = Whitespace()
|
||||
>>> files = [...]
|
||||
>>> tokenizer.train(files, trainer)
|
||||
```
|
||||
|
||||
We now have a tokenizer trained on the files we defined. We can either continue using it in that runtime, or save it to
|
||||
a JSON file for future re-use.
|
||||
|
||||
## Loading directly from the tokenizer object
|
||||
|
||||
Let's see how to leverage this tokenizer object in the 🤗 Transformers library. The
|
||||
[`PreTrainedTokenizerFast`] class allows for easy instantiation, by accepting the instantiated
|
||||
*tokenizer* object as an argument:
|
||||
|
||||
```python
|
||||
>>> from transformers import PreTrainedTokenizerFast
|
||||
|
||||
>>> fast_tokenizer = PreTrainedTokenizerFast(tokenizer_object=tokenizer)
|
||||
```
|
||||
|
||||
This object can now be used with all the methods shared by the 🤗 Transformers tokenizers! Head to [the tokenizer
|
||||
page](main_classes/tokenizer) for more information.
|
||||
|
||||
## Loading from a JSON file
|
||||
|
||||
In order to load a tokenizer from a JSON file, let's first start by saving our tokenizer:
|
||||
|
||||
```python
|
||||
>>> tokenizer.save("tokenizer.json")
|
||||
```
|
||||
|
||||
The path to which we saved this file can be passed to the [`PreTrainedTokenizerFast`] initialization
|
||||
method using the `tokenizer_file` parameter:
|
||||
|
||||
```python
|
||||
>>> from transformers import PreTrainedTokenizerFast
|
||||
|
||||
>>> fast_tokenizer = PreTrainedTokenizerFast(tokenizer_file="tokenizer.json")
|
||||
```
|
||||
|
||||
This object can now be used with all the methods shared by the 🤗 Transformers tokenizers! Head to [the tokenizer
|
||||
page](main_classes/tokenizer) for more information.
|
||||
300
docs/source/en/glossary.mdx
Normal file
300
docs/source/en/glossary.mdx
Normal file
@ -0,0 +1,300 @@
|
||||
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
# Glossary
|
||||
|
||||
## General terms
|
||||
|
||||
- autoencoding models: see MLM
|
||||
- autoregressive models: see CLM
|
||||
- CLM: causal language modeling, a pretraining task where the model reads the texts in order and has to predict the
|
||||
next word. It's usually done by reading the whole sentence but using a mask inside the model to hide the future
|
||||
tokens at a certain timestep.
|
||||
- deep learning: machine learning algorithms which uses neural networks with several layers.
|
||||
- MLM: masked language modeling, a pretraining task where the model sees a corrupted version of the texts, usually done
|
||||
by masking some tokens randomly, and has to predict the original text.
|
||||
- multimodal: a task that combines texts with another kind of inputs (for instance images).
|
||||
- NLG: natural language generation, all tasks related to generating text (for instance talk with transformers,
|
||||
translation).
|
||||
- NLP: natural language processing, a generic way to say "deal with texts".
|
||||
- NLU: natural language understanding, all tasks related to understanding what is in a text (for instance classifying
|
||||
the whole text, individual words).
|
||||
- pretrained model: a model that has been pretrained on some data (for instance all of Wikipedia). Pretraining methods
|
||||
involve a self-supervised objective, which can be reading the text and trying to predict the next word (see CLM) or
|
||||
masking some words and trying to predict them (see MLM).
|
||||
- RNN: recurrent neural network, a type of model that uses a loop over a layer to process texts.
|
||||
- self-attention: each element of the input finds out which other elements of the input they should attend to.
|
||||
- seq2seq or sequence-to-sequence: models that generate a new sequence from an input, like translation models, or
|
||||
summarization models (such as [Bart](model_doc/bart) or [T5](model_doc/t5)).
|
||||
- token: a part of a sentence, usually a word, but can also be a subword (non-common words are often split in subwords)
|
||||
or a punctuation symbol.
|
||||
- transformer: self-attention based deep learning model architecture.
|
||||
|
||||
## Model inputs
|
||||
|
||||
Every model is different yet bears similarities with the others. Therefore most models use the same inputs, which are
|
||||
detailed here alongside usage examples.
|
||||
|
||||
<a id='input-ids'></a>
|
||||
|
||||
### Input IDs
|
||||
|
||||
The input ids are often the only required parameters to be passed to the model as input. *They are token indices,
|
||||
numerical representations of tokens building the sequences that will be used as input by the model*.
|
||||
|
||||
<Youtube id="VFp38yj8h3A"/>
|
||||
|
||||
Each tokenizer works differently but the underlying mechanism remains the same. Here's an example using the BERT
|
||||
tokenizer, which is a [WordPiece](https://arxiv.org/pdf/1609.08144.pdf) tokenizer:
|
||||
|
||||
```python
|
||||
>>> from transformers import BertTokenizer
|
||||
|
||||
>>> tokenizer = BertTokenizer.from_pretrained("bert-base-cased")
|
||||
|
||||
>>> sequence = "A Titan RTX has 24GB of VRAM"
|
||||
```
|
||||
|
||||
The tokenizer takes care of splitting the sequence into tokens available in the tokenizer vocabulary.
|
||||
|
||||
```python
|
||||
>>> tokenized_sequence = tokenizer.tokenize(sequence)
|
||||
```
|
||||
|
||||
The tokens are either words or subwords. Here for instance, "VRAM" wasn't in the model vocabulary, so it's been split
|
||||
in "V", "RA" and "M". To indicate those tokens are not separate words but parts of the same word, a double-hash prefix
|
||||
is added for "RA" and "M":
|
||||
|
||||
```python
|
||||
>>> print(tokenized_sequence)
|
||||
['A', 'Titan', 'R', '##T', '##X', 'has', '24', '##GB', 'of', 'V', '##RA', '##M']
|
||||
```
|
||||
|
||||
These tokens can then be converted into IDs which are understandable by the model. This can be done by directly feeding
|
||||
the sentence to the tokenizer, which leverages the Rust implementation of [🤗 Tokenizers](https://github.com/huggingface/tokenizers) for peak performance.
|
||||
|
||||
```python
|
||||
>>> inputs = tokenizer(sequence)
|
||||
```
|
||||
|
||||
The tokenizer returns a dictionary with all the arguments necessary for its corresponding model to work properly. The
|
||||
token indices are under the key "input_ids":
|
||||
|
||||
```python
|
||||
>>> encoded_sequence = inputs["input_ids"]
|
||||
>>> print(encoded_sequence)
|
||||
[101, 138, 18696, 155, 1942, 3190, 1144, 1572, 13745, 1104, 159, 9664, 2107, 102]
|
||||
```
|
||||
|
||||
Note that the tokenizer automatically adds "special tokens" (if the associated model relies on them) which are special
|
||||
IDs the model sometimes uses.
|
||||
|
||||
If we decode the previous sequence of ids,
|
||||
|
||||
```python
|
||||
>>> decoded_sequence = tokenizer.decode(encoded_sequence)
|
||||
```
|
||||
|
||||
we will see
|
||||
|
||||
```python
|
||||
>>> print(decoded_sequence)
|
||||
[CLS] A Titan RTX has 24GB of VRAM [SEP]
|
||||
```
|
||||
|
||||
because this is the way a [`BertModel`] is going to expect its inputs.
|
||||
|
||||
<a id='attention-mask'></a>
|
||||
|
||||
### Attention mask
|
||||
|
||||
The attention mask is an optional argument used when batching sequences together.
|
||||
|
||||
<Youtube id="M6adb1j2jPI"/>
|
||||
|
||||
This argument indicates to the model which tokens should be attended to, and which should not.
|
||||
|
||||
For example, consider these two sequences:
|
||||
|
||||
```python
|
||||
>>> from transformers import BertTokenizer
|
||||
|
||||
>>> tokenizer = BertTokenizer.from_pretrained("bert-base-cased")
|
||||
|
||||
>>> sequence_a = "This is a short sequence."
|
||||
>>> sequence_b = "This is a rather long sequence. It is at least longer than the sequence A."
|
||||
|
||||
>>> encoded_sequence_a = tokenizer(sequence_a)["input_ids"]
|
||||
>>> encoded_sequence_b = tokenizer(sequence_b)["input_ids"]
|
||||
```
|
||||
|
||||
The encoded versions have different lengths:
|
||||
|
||||
```python
|
||||
>>> len(encoded_sequence_a), len(encoded_sequence_b)
|
||||
(8, 19)
|
||||
```
|
||||
|
||||
Therefore, we can't put them together in the same tensor as-is. The first sequence needs to be padded up to the length
|
||||
of the second one, or the second one needs to be truncated down to the length of the first one.
|
||||
|
||||
In the first case, the list of IDs will be extended by the padding indices. We can pass a list to the tokenizer and ask
|
||||
it to pad like this:
|
||||
|
||||
```python
|
||||
>>> padded_sequences = tokenizer([sequence_a, sequence_b], padding=True)
|
||||
```
|
||||
|
||||
We can see that 0s have been added on the right of the first sentence to make it the same length as the second one:
|
||||
|
||||
```python
|
||||
>>> padded_sequences["input_ids"]
|
||||
[[101, 1188, 1110, 170, 1603, 4954, 119, 102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [101, 1188, 1110, 170, 1897, 1263, 4954, 119, 1135, 1110, 1120, 1655, 2039, 1190, 1103, 4954, 138, 119, 102]]
|
||||
```
|
||||
|
||||
This can then be converted into a tensor in PyTorch or TensorFlow. The attention mask is a binary tensor indicating the
|
||||
position of the padded indices so that the model does not attend to them. For the [`BertTokenizer`],
|
||||
`1` indicates a value that should be attended to, while `0` indicates a padded value. This attention mask is
|
||||
in the dictionary returned by the tokenizer under the key "attention_mask":
|
||||
|
||||
```python
|
||||
>>> padded_sequences["attention_mask"]
|
||||
[[1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
|
||||
```
|
||||
|
||||
<a id='token-type-ids'></a>
|
||||
|
||||
### Token Type IDs
|
||||
|
||||
Some models' purpose is to do classification on pairs of sentences or question answering.
|
||||
|
||||
<Youtube id="0u3ioSwev3s"/>
|
||||
|
||||
These require two different sequences to be joined in a single "input_ids" entry, which usually is performed with the
|
||||
help of special tokens, such as the classifier (`[CLS]`) and separator (`[SEP]`) tokens. For example, the BERT
|
||||
model builds its two sequence input as such:
|
||||
|
||||
```python
|
||||
>>> # [CLS] SEQUENCE_A [SEP] SEQUENCE_B [SEP]
|
||||
```
|
||||
|
||||
We can use our tokenizer to automatically generate such a sentence by passing the two sequences to `tokenizer` as two
|
||||
arguments (and not a list, like before) like this:
|
||||
|
||||
```python
|
||||
>>> from transformers import BertTokenizer
|
||||
|
||||
>>> tokenizer = BertTokenizer.from_pretrained("bert-base-cased")
|
||||
>>> sequence_a = "HuggingFace is based in NYC"
|
||||
>>> sequence_b = "Where is HuggingFace based?"
|
||||
|
||||
>>> encoded_dict = tokenizer(sequence_a, sequence_b)
|
||||
>>> decoded = tokenizer.decode(encoded_dict["input_ids"])
|
||||
```
|
||||
|
||||
which will return:
|
||||
|
||||
```python
|
||||
>>> print(decoded)
|
||||
[CLS] HuggingFace is based in NYC [SEP] Where is HuggingFace based? [SEP]
|
||||
```
|
||||
|
||||
This is enough for some models to understand where one sequence ends and where another begins. However, other models,
|
||||
such as BERT, also deploy token type IDs (also called segment IDs). They are represented as a binary mask identifying
|
||||
the two types of sequence in the model.
|
||||
|
||||
The tokenizer returns this mask as the "token_type_ids" entry:
|
||||
|
||||
```python
|
||||
>>> encoded_dict["token_type_ids"]
|
||||
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1]
|
||||
```
|
||||
|
||||
The first sequence, the "context" used for the question, has all its tokens represented by a `0`, whereas the
|
||||
second sequence, corresponding to the "question", has all its tokens represented by a `1`.
|
||||
|
||||
Some models, like [`XLNetModel`] use an additional token represented by a `2`.
|
||||
|
||||
<a id='position-ids'></a>
|
||||
|
||||
### Position IDs
|
||||
|
||||
Contrary to RNNs that have the position of each token embedded within them, transformers are unaware of the position of
|
||||
each token. Therefore, the position IDs (`position_ids`) are used by the model to identify each token's position in
|
||||
the list of tokens.
|
||||
|
||||
They are an optional parameter. If no `position_ids` are passed to the model, the IDs are automatically created as
|
||||
absolute positional embeddings.
|
||||
|
||||
Absolute positional embeddings are selected in the range `[0, config.max_position_embeddings - 1]`. Some models use
|
||||
other types of positional embeddings, such as sinusoidal position embeddings or relative position embeddings.
|
||||
|
||||
<a id='labels'></a>
|
||||
|
||||
### Labels
|
||||
|
||||
The labels are an optional argument which can be passed in order for the model to compute the loss itself. These labels
|
||||
should be the expected prediction of the model: it will use the standard loss in order to compute the loss between its
|
||||
predictions and the expected value (the label).
|
||||
|
||||
These labels are different according to the model head, for example:
|
||||
|
||||
- For sequence classification models (e.g., [`BertForSequenceClassification`]), the model expects a
|
||||
tensor of dimension `(batch_size)` with each value of the batch corresponding to the expected label of the
|
||||
entire sequence.
|
||||
- For token classification models (e.g., [`BertForTokenClassification`]), the model expects a tensor
|
||||
of dimension `(batch_size, seq_length)` with each value corresponding to the expected label of each individual
|
||||
token.
|
||||
- For masked language modeling (e.g., [`BertForMaskedLM`]), the model expects a tensor of dimension
|
||||
`(batch_size, seq_length)` with each value corresponding to the expected label of each individual token: the
|
||||
labels being the token ID for the masked token, and values to be ignored for the rest (usually -100).
|
||||
- For sequence to sequence tasks,(e.g., [`BartForConditionalGeneration`],
|
||||
[`MBartForConditionalGeneration`]), the model expects a tensor of dimension `(batch_size, tgt_seq_length)` with each value corresponding to the target sequences associated with each input sequence. During
|
||||
training, both *BART* and *T5* will make the appropriate *decoder_input_ids* and decoder attention masks internally.
|
||||
They usually do not need to be supplied. This does not apply to models leveraging the Encoder-Decoder framework. See
|
||||
the documentation of each model for more information on each specific model's labels.
|
||||
|
||||
The base models (e.g., [`BertModel`]) do not accept labels, as these are the base transformer
|
||||
models, simply outputting features.
|
||||
|
||||
<a id='decoder-input-ids'></a>
|
||||
|
||||
### Decoder input IDs
|
||||
|
||||
This input is specific to encoder-decoder models, and contains the input IDs that will be fed to the decoder. These
|
||||
inputs should be used for sequence to sequence tasks, such as translation or summarization, and are usually built in a
|
||||
way specific to each model.
|
||||
|
||||
Most encoder-decoder models (BART, T5) create their `decoder_input_ids` on their own from the `labels`. In
|
||||
such models, passing the `labels` is the preferred way to handle training.
|
||||
|
||||
Please check each model's docs to see how they handle these input IDs for sequence to sequence training.
|
||||
|
||||
<a id='feed-forward-chunking'></a>
|
||||
|
||||
### Feed Forward Chunking
|
||||
|
||||
In each residual attention block in transformers the self-attention layer is usually followed by 2 feed forward layers.
|
||||
The intermediate embedding size of the feed forward layers is often bigger than the hidden size of the model (e.g., for
|
||||
`bert-base-uncased`).
|
||||
|
||||
For an input of size `[batch_size, sequence_length]`, the memory required to store the intermediate feed forward
|
||||
embeddings `[batch_size, sequence_length, config.intermediate_size]` can account for a large fraction of the memory
|
||||
use. The authors of [Reformer: The Efficient Transformer](https://arxiv.org/abs/2001.04451) noticed that since the
|
||||
computation is independent of the `sequence_length` dimension, it is mathematically equivalent to compute the output
|
||||
embeddings of both feed forward layers `[batch_size, config.hidden_size]_0, ..., [batch_size, config.hidden_size]_n`
|
||||
individually and concat them afterward to `[batch_size, sequence_length, config.hidden_size]` with `n = sequence_length`, which trades increased computation time against reduced memory use, but yields a mathematically
|
||||
**equivalent** result.
|
||||
|
||||
For models employing the function [`apply_chunking_to_forward`], the `chunk_size` defines the
|
||||
number of output embeddings that are computed in parallel and thus defines the trade-off between memory and time
|
||||
complexity. If `chunk_size` is set to 0, no feed forward chunking is done.
|
||||
@ -12,25 +12,18 @@ specific language governing permissions and limitations under the License.
|
||||
|
||||
# 🤗 Transformers
|
||||
|
||||
State-of-the-art Machine Learning for Jax, Pytorch and TensorFlow
|
||||
State-of-the-art Machine Learning for PyTorch, TensorFlow and JAX.
|
||||
|
||||
🤗 Transformers (formerly known as _pytorch-transformers_ and _pytorch-pretrained-bert_) provides thousands of pretrained models to perform tasks on different modalities such as text, vision, and audio.
|
||||
🤗 Transformers provides APIs to easily download and train state-of-the-art pretrained models. Using pretrained models can reduce your compute costs, carbon footprint, and save you time from training a model from scratch. The models can be used across different modalities such as:
|
||||
|
||||
These models can applied on:
|
||||
* 📝 Text: text classification, information extraction, question answering, summarization, translation, and text generation in over 100 languages.
|
||||
* 🖼️ Images: image classification, object detection, and segmentation.
|
||||
* 🗣️ Audio: speech recognition and audio classification.
|
||||
* 🐙 Multimodal: table question answering, optical character recognition, information extraction from scanned documents, video classification, and visual question answering.
|
||||
|
||||
* 📝 Text, for tasks like text classification, information extraction, question answering, summarization, translation, text generation, in over 100 languages.
|
||||
* 🖼️ Images, for tasks like image classification, object detection, and segmentation.
|
||||
* 🗣️ Audio, for tasks like speech recognition and audio classification.
|
||||
Our library supports seamless integration between three of the most popular deep learning libraries: [PyTorch](https://pytorch.org/), [TensorFlow](https://www.tensorflow.org/) and [JAX](https://jax.readthedocs.io/en/latest/). Train your model in three lines of code in one framework, and load it for inference with another.
|
||||
|
||||
Transformer models can also perform tasks on **several modalities combined**, such as table question answering, optical character recognition, information extraction from scanned documents, video classification, and visual question answering.
|
||||
|
||||
🤗 Transformers provides APIs to quickly download and use those pretrained models on a given text, fine-tune them on your own datasets and then share them with the community on our [model hub](https://huggingface.co/models). At the same time, each python module defining an architecture is fully standalone and can be modified to enable quick research experiments.
|
||||
|
||||
🤗 Transformers is backed by the three most popular deep learning libraries — [Jax](https://jax.readthedocs.io/en/latest/), [PyTorch](https://pytorch.org/) and [TensorFlow](https://www.tensorflow.org/) — with a seamless integration between them. It's straightforward to train your models with one before loading them for inference with the other.
|
||||
|
||||
This is the documentation of our repository [transformers](https://github.com/huggingface/transformers). You can
|
||||
also follow our [online course](https://huggingface.co/course) that teaches how to use this library, as well as the
|
||||
other libraries developed by Hugging Face and the Hub.
|
||||
Each 🤗 Transformers architecture is defined in a standalone Python module so they can be easily customized for research and experiments.
|
||||
|
||||
## If you are looking for custom support from the Hugging Face team
|
||||
|
||||
@ -38,91 +31,65 @@ other libraries developed by Hugging Face and the Hub.
|
||||
<img alt="HuggingFace Expert Acceleration Program" src="https://huggingface.co/front/thumbnails/support.png" style="max-width: 600px; border: 1px solid #eee; border-radius: 4px; box-shadow: 0 1px 2px 0 rgba(0, 0, 0, 0.05);">
|
||||
</a><br>
|
||||
|
||||
## Features
|
||||
|
||||
1. Easy-to-use state-of-the-art models:
|
||||
- High performance on natural language understanding & generation, computer vision, and audio tasks.
|
||||
- Low barrier to entry for educators and practitioners.
|
||||
- Few user-facing abstractions with just three classes to learn.
|
||||
- A unified API for using all our pretrained models.
|
||||
|
||||
1. Lower compute costs, smaller carbon footprint:
|
||||
- Researchers can share trained models instead of always retraining.
|
||||
- Practitioners can reduce compute time and production costs.
|
||||
- Dozens of architectures with over 20,000 pretrained models, some in more than 100 languages.
|
||||
|
||||
1. Choose the right framework for every part of a model's lifetime:
|
||||
- Train state-of-the-art models in 3 lines of code.
|
||||
- Move a single model between TF2.0/PyTorch/JAX frameworks at will.
|
||||
- Seamlessly pick the right framework for training, evaluation and production.
|
||||
|
||||
1. Easily customize a model or an example to your needs:
|
||||
- We provide examples for each architecture to reproduce the results published by its original authors.
|
||||
- Model internals are exposed as consistently as possible.
|
||||
- Model files can be used independently of the library for quick experiments.
|
||||
|
||||
[All the model checkpoints](https://huggingface.co/models) are seamlessly integrated from the huggingface.co [model
|
||||
hub](https://huggingface.co) where they are uploaded directly by [users](https://huggingface.co/users) and
|
||||
[organizations](https://huggingface.co/organizations).
|
||||
|
||||
Current number of checkpoints: <img src="https://img.shields.io/endpoint?url=https://huggingface.co/api/shields/models&color=brightgreen">
|
||||
|
||||
## Contents
|
||||
|
||||
The documentation is organized in five parts:
|
||||
|
||||
- **GET STARTED** contains a quick tour, the installation instructions and some useful information about our philosophy
|
||||
and a glossary.
|
||||
- **USING 🤗 TRANSFORMERS** contains general tutorials on how to use the library.
|
||||
- **ADVANCED GUIDES** contains more advanced guides that are more specific to a given script or part of the library.
|
||||
- **RESEARCH** focuses on tutorials that have less to do with how to use the library but more about general research in
|
||||
transformers model
|
||||
- **API** contains the documentation of each public class and function, grouped in:
|
||||
- **GET STARTED** contains a quick tour and installation instructions to get up and running with 🤗 Transformers.
|
||||
- **TUTORIALS** are a great place to begin if you are new to our library. This section will help you gain the basic skills you need to start using 🤗 Transformers.
|
||||
- **HOW-TO GUIDES** will show you how to achieve a specific goal like fine-tuning a pretrained model for language modeling or how to create a custom model head.
|
||||
- **CONCEPTUAL GUIDES** provides more discussion and explanation of the underlying concepts and ideas behind models, tasks, and the design philosophy of 🤗 Transformers.
|
||||
- **API** describes each class and function, grouped in:
|
||||
|
||||
- **MAIN CLASSES** for the main classes exposing the important APIs of the library.
|
||||
- **MODELS** for the classes and functions related to each model implemented in the library.
|
||||
- **INTERNAL HELPERS** for the classes and functions we use internally.
|
||||
|
||||
The library currently contains Jax, PyTorch and Tensorflow implementations, pretrained model weights, usage scripts and
|
||||
conversion utilities for the following models.
|
||||
The library currently contains JAX, PyTorch and TensorFlow implementations, pretrained model weights, usage scripts and conversion utilities for the following models.
|
||||
|
||||
### Supported models
|
||||
|
||||
<!--This list is updated automatically from the README with _make fix-copies_. Do not update manually! -->
|
||||
|
||||
1. **[ALBERT](model_doc/albert)** (from Google Research and the Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.
|
||||
1. **[BART](model_doc/bart)** (from Facebook) released with the paper [BART: Denoising Sequence-to-Sequence Pre-training for Natural Language Generation, Translation, and Comprehension](https://arxiv.org/pdf/1910.13461.pdf) by Mike Lewis, Yinhan Liu, Naman Goyal, Marjan Ghazvininejad, Abdelrahman Mohamed, Omer Levy, Ves Stoyanov and Luke Zettlemoyer.
|
||||
1. **[BART](model_doc/bart)** (from Facebook) released with the paper [BART: Denoising Sequence-to-Sequence Pre-training for Natural Language Generation, Translation, and Comprehension](https://arxiv.org/abs/1910.13461) by Mike Lewis, Yinhan Liu, Naman Goyal, Marjan Ghazvininejad, Abdelrahman Mohamed, Omer Levy, Ves Stoyanov and Luke Zettlemoyer.
|
||||
1. **[BARThez](model_doc/barthez)** (from École polytechnique) released with the paper [BARThez: a Skilled Pretrained French Sequence-to-Sequence Model](https://arxiv.org/abs/2010.12321) by Moussa Kamal Eddine, Antoine J.-P. Tixier, Michalis Vazirgiannis.
|
||||
1. **[BARTpho](model_doc/bartpho)** (from VinAI Research) released with the paper [BARTpho: Pre-trained Sequence-to-Sequence Models for Vietnamese](https://arxiv.org/abs/2109.09701) by Nguyen Luong Tran, Duong Minh Le and Dat Quoc Nguyen.
|
||||
1. **[BEiT](model_doc/beit)** (from Microsoft) released with the paper [BEiT: BERT Pre-Training of Image Transformers](https://arxiv.org/abs/2106.08254) by Hangbo Bao, Li Dong, Furu Wei.
|
||||
1. **[BERT](model_doc/bert)** (from Google) released with the paper [BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding](https://arxiv.org/abs/1810.04805) by Jacob Devlin, Ming-Wei Chang, Kenton Lee and Kristina Toutanova.
|
||||
1. **[BERTweet](model_doc/bertweet)** (from VinAI Research) released with the paper [BERTweet: A pre-trained language model for English Tweets](https://aclanthology.org/2020.emnlp-demos.2/) by Dat Quoc Nguyen, Thanh Vu and Anh Tuan Nguyen.
|
||||
1. **[BERT For Sequence Generation](model_doc/bertgeneration)** (from Google) released with the paper [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://arxiv.org/abs/1907.12461) by Sascha Rothe, Shashi Narayan, Aliaksei Severyn.
|
||||
1. **[BigBird-RoBERTa](model_doc/bigbird)** (from Google Research) released with the paper [Big Bird: Transformers for Longer Sequences](https://arxiv.org/abs/2007.14062) by Manzil Zaheer, Guru Guruganesh, Avinava Dubey, Joshua Ainslie, Chris Alberti, Santiago Ontanon, Philip Pham, Anirudh Ravula, Qifan Wang, Li Yang, Amr Ahmed.
|
||||
1. **[BERT For Sequence Generation](model_doc/bert-generation)** (from Google) released with the paper [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://arxiv.org/abs/1907.12461) by Sascha Rothe, Shashi Narayan, Aliaksei Severyn.
|
||||
1. **[BigBird-RoBERTa](model_doc/big_bird)** (from Google Research) released with the paper [Big Bird: Transformers for Longer Sequences](https://arxiv.org/abs/2007.14062) by Manzil Zaheer, Guru Guruganesh, Avinava Dubey, Joshua Ainslie, Chris Alberti, Santiago Ontanon, Philip Pham, Anirudh Ravula, Qifan Wang, Li Yang, Amr Ahmed.
|
||||
1. **[BigBird-Pegasus](model_doc/bigbird_pegasus)** (from Google Research) released with the paper [Big Bird: Transformers for Longer Sequences](https://arxiv.org/abs/2007.14062) by Manzil Zaheer, Guru Guruganesh, Avinava Dubey, Joshua Ainslie, Chris Alberti, Santiago Ontanon, Philip Pham, Anirudh Ravula, Qifan Wang, Li Yang, Amr Ahmed.
|
||||
1. **[Blenderbot](model_doc/blenderbot)** (from Facebook) released with the paper [Recipes for building an open-domain chatbot](https://arxiv.org/abs/2004.13637) by Stephen Roller, Emily Dinan, Naman Goyal, Da Ju, Mary Williamson, Yinhan Liu, Jing Xu, Myle Ott, Kurt Shuster, Eric M. Smith, Y-Lan Boureau, Jason Weston.
|
||||
1. **[BlenderbotSmall](model_doc/blenderbot_small)** (from Facebook) released with the paper [Recipes for building an open-domain chatbot](https://arxiv.org/abs/2004.13637) by Stephen Roller, Emily Dinan, Naman Goyal, Da Ju, Mary Williamson, Yinhan Liu, Jing Xu, Myle Ott, Kurt Shuster, Eric M. Smith, Y-Lan Boureau, Jason Weston.
|
||||
1. **[BlenderbotSmall](model_doc/blenderbot-small)** (from Facebook) released with the paper [Recipes for building an open-domain chatbot](https://arxiv.org/abs/2004.13637) by Stephen Roller, Emily Dinan, Naman Goyal, Da Ju, Mary Williamson, Yinhan Liu, Jing Xu, Myle Ott, Kurt Shuster, Eric M. Smith, Y-Lan Boureau, Jason Weston.
|
||||
1. **[BORT](model_doc/bort)** (from Alexa) released with the paper [Optimal Subarchitecture Extraction For BERT](https://arxiv.org/abs/2010.10499) by Adrian de Wynter and Daniel J. Perry.
|
||||
1. **[ByT5](model_doc/byt5)** (from Google Research) released with the paper [ByT5: Towards a token-free future with pre-trained byte-to-byte models](https://arxiv.org/abs/2105.13626) by Linting Xue, Aditya Barua, Noah Constant, Rami Al-Rfou, Sharan Narang, Mihir Kale, Adam Roberts, Colin Raffel.
|
||||
1. **[CamemBERT](model_doc/camembert)** (from Inria/Facebook/Sorbonne) released with the paper [CamemBERT: a Tasty French Language Model](https://arxiv.org/abs/1911.03894) by Louis Martin*, Benjamin Muller*, Pedro Javier Ortiz Suárez*, Yoann Dupont, Laurent Romary, Éric Villemonte de la Clergerie, Djamé Seddah and Benoît Sagot.
|
||||
1. **[CANINE](model_doc/canine)** (from Google Research) released with the paper [CANINE: Pre-training an Efficient Tokenization-Free Encoder for Language Representation](https://arxiv.org/abs/2103.06874) by Jonathan H. Clark, Dan Garrette, Iulia Turc, John Wieting.
|
||||
1. **[ConvNeXT](model_doc/convnext)** (from Facebook AI) released with the paper [A ConvNet for the 2020s](https://arxiv.org/abs/2201.03545) by Zhuang Liu, Hanzi Mao, Chao-Yuan Wu, Christoph Feichtenhofer, Trevor Darrell, Saining Xie.
|
||||
1. **[CLIP](model_doc/clip)** (from OpenAI) released with the paper [Learning Transferable Visual Models From Natural Language Supervision](https://arxiv.org/abs/2103.00020) by Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, Ilya Sutskever.
|
||||
1. **[ConvBERT](model_doc/convbert)** (from YituTech) released with the paper [ConvBERT: Improving BERT with Span-based Dynamic Convolution](https://arxiv.org/abs/2008.02496) by Zihang Jiang, Weihao Yu, Daquan Zhou, Yunpeng Chen, Jiashi Feng, Shuicheng Yan.
|
||||
1. **[CPM](model_doc/cpm)** (from Tsinghua University) released with the paper [CPM: A Large-scale Generative Chinese Pre-trained Language Model](https://arxiv.org/abs/2012.00413) by Zhengyan Zhang, Xu Han, Hao Zhou, Pei Ke, Yuxian Gu, Deming Ye, Yujia Qin, Yusheng Su, Haozhe Ji, Jian Guan, Fanchao Qi, Xiaozhi Wang, Yanan Zheng, Guoyang Zeng, Huanqi Cao, Shengqi Chen, Daixuan Li, Zhenbo Sun, Zhiyuan Liu, Minlie Huang, Wentao Han, Jie Tang, Juanzi Li, Xiaoyan Zhu, Maosong Sun.
|
||||
1. **[CTRL](model_doc/ctrl)** (from Salesforce) released with the paper [CTRL: A Conditional Transformer Language Model for Controllable Generation](https://arxiv.org/abs/1909.05858) by Nitish Shirish Keskar*, Bryan McCann*, Lav R. Varshney, Caiming Xiong and Richard Socher.
|
||||
1. **[Data2Vec](model_doc/data2vec)** (from Facebook) released with the paper [Data2Vec: A General Framework for Self-supervised Learning in Speech, Vision and Language](https://arxiv.org/abs/2202.03555) by Alexei Baevski, Wei-Ning Hsu, Qiantong Xu, Arun Babu, Jiatao Gu, Michael Auli.
|
||||
1. **[DeBERTa](model_doc/deberta)** (from Microsoft) released with the paper [DeBERTa: Decoding-enhanced BERT with Disentangled Attention](https://arxiv.org/abs/2006.03654) by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen.
|
||||
1. **[DeBERTa-v2](model_doc/deberta_v2)** (from Microsoft) released with the paper [DeBERTa: Decoding-enhanced BERT with Disentangled Attention](https://arxiv.org/abs/2006.03654) by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen.
|
||||
1. **[DeBERTa-v2](model_doc/deberta-v2)** (from Microsoft) released with the paper [DeBERTa: Decoding-enhanced BERT with Disentangled Attention](https://arxiv.org/abs/2006.03654) by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen.
|
||||
1. **[Decision Transformer](model_doc/decision_transformer)** (from Berkeley/Facebook/Google) released with the paper [Decision Transformer: Reinforcement Learning via Sequence Modeling](https://arxiv.org/abs/2106.01345) by Lili Chen, Kevin Lu, Aravind Rajeswaran, Kimin Lee, Aditya Grover, Michael Laskin, Pieter Abbeel, Aravind Srinivas, Igor Mordatch.
|
||||
1. **[DiT](model_doc/dit)** (from Microsoft Research) released with the paper [DiT: Self-supervised Pre-training for Document Image Transformer](https://arxiv.org/abs/2203.02378) by Junlong Li, Yiheng Xu, Tengchao Lv, Lei Cui, Cha Zhang, Furu Wei.
|
||||
1. **[DeiT](model_doc/deit)** (from Facebook) released with the paper [Training data-efficient image transformers & distillation through attention](https://arxiv.org/abs/2012.12877) by Hugo Touvron, Matthieu Cord, Matthijs Douze, Francisco Massa, Alexandre Sablayrolles, Hervé Jégou.
|
||||
1. **[DETR](model_doc/detr)** (from Facebook) released with the paper [End-to-End Object Detection with Transformers](https://arxiv.org/abs/2005.12872) by Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, Sergey Zagoruyko.
|
||||
1. **[DialoGPT](model_doc/dialogpt)** (from Microsoft Research) released with the paper [DialoGPT: Large-Scale Generative Pre-training for Conversational Response Generation](https://arxiv.org/abs/1911.00536) by Yizhe Zhang, Siqi Sun, Michel Galley, Yen-Chun Chen, Chris Brockett, Xiang Gao, Jianfeng Gao, Jingjing Liu, Bill Dolan.
|
||||
1. **[DistilBERT](model_doc/distilbert)** (from HuggingFace), released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same method has been applied to compress GPT2 into [DistilGPT2](https://github.com/huggingface/transformers/tree/master/examples/research_projects/distillation), RoBERTa into [DistilRoBERTa](https://github.com/huggingface/transformers/tree/master/examples/research_projects/distillation), Multilingual BERT into [DistilmBERT](https://github.com/huggingface/transformers/tree/master/examples/research_projects/distillation) and a German version of DistilBERT.
|
||||
1. **[DistilBERT](model_doc/distilbert)** (from HuggingFace), released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same method has been applied to compress GPT2 into [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/research_projects/distillation), RoBERTa into [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/research_projects/distillation), Multilingual BERT into [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/research_projects/distillation) and a German version of DistilBERT.
|
||||
1. **[DPR](model_doc/dpr)** (from Facebook) released with the paper [Dense Passage Retrieval for Open-Domain Question Answering](https://arxiv.org/abs/2004.04906) by Vladimir Karpukhin, Barlas Oğuz, Sewon Min, Patrick Lewis, Ledell Wu, Sergey Edunov, Danqi Chen, and Wen-tau Yih.
|
||||
1. **[EncoderDecoder](model_doc/encoderdecoder)** (from Google Research) released with the paper [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://arxiv.org/abs/1907.12461) by Sascha Rothe, Shashi Narayan, Aliaksei Severyn.
|
||||
1. **[DPT](master/model_doc/dpt)** (from Intel Labs) released with the paper [Vision Transformers for Dense Prediction](https://arxiv.org/abs/2103.13413) by René Ranftl, Alexey Bochkovskiy, Vladlen Koltun.
|
||||
1. **[EncoderDecoder](model_doc/encoder-decoder)** (from Google Research) released with the paper [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://arxiv.org/abs/1907.12461) by Sascha Rothe, Shashi Narayan, Aliaksei Severyn.
|
||||
1. **[ELECTRA](model_doc/electra)** (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang Luong, Quoc V. Le, Christopher D. Manning.
|
||||
1. **[FlauBERT](model_doc/flaubert)** (from CNRS) released with the paper [FlauBERT: Unsupervised Language Model Pre-training for French](https://arxiv.org/abs/1912.05372) by Hang Le, Loïc Vial, Jibril Frej, Vincent Segonne, Maximin Coavoux, Benjamin Lecouteux, Alexandre Allauzen, Benoît Crabbé, Laurent Besacier, Didier Schwab.
|
||||
1. **[FNet](model_doc/fnet)** (from Google Research) released with the paper [FNet: Mixing Tokens with Fourier Transforms](https://arxiv.org/abs/2105.03824) by James Lee-Thorp, Joshua Ainslie, Ilya Eckstein, Santiago Ontanon.
|
||||
1. **[Funnel Transformer](model_doc/funnel)** (from CMU/Google Brain) released with the paper [Funnel-Transformer: Filtering out Sequential Redundancy for Efficient Language Processing](https://arxiv.org/abs/2006.03236) by Zihang Dai, Guokun Lai, Yiming Yang, Quoc V. Le.
|
||||
1. **[GPT](model_doc/gpt)** (from OpenAI) released with the paper [Improving Language Understanding by Generative Pre-Training](https://blog.openai.com/language-unsupervised/) by Alec Radford, Karthik Narasimhan, Tim Salimans and Ilya Sutskever.
|
||||
1. **[GLPN](model_doc/glpn)** (from KAIST) released with the paper [Global-Local Path Networks for Monocular Depth Estimation with Vertical CutDepth](https://arxiv.org/abs/2201.07436) by Doyeon Kim, Woonghyun Ga, Pyungwhan Ahn, Donggyu Joo, Sehwan Chun, Junmo Kim.
|
||||
1. **[GPT](model_doc/openai-gpt)** (from OpenAI) released with the paper [Improving Language Understanding by Generative Pre-Training](https://blog.openai.com/language-unsupervised/) by Alec Radford, Karthik Narasimhan, Tim Salimans and Ilya Sutskever.
|
||||
1. **[GPT-2](model_doc/gpt2)** (from OpenAI) released with the paper [Language Models are Unsupervised Multitask Learners](https://blog.openai.com/better-language-models/) by Alec Radford*, Jeffrey Wu*, Rewon Child, David Luan, Dario Amodei** and Ilya Sutskever**.
|
||||
1. **[GPT-J](model_doc/gptj)** (from EleutherAI) released in the repository [kingoflolz/mesh-transformer-jax](https://github.com/kingoflolz/mesh-transformer-jax/) by Ben Wang and Aran Komatsuzaki.
|
||||
1. **[GPT Neo](model_doc/gpt_neo)** (from EleutherAI) released in the repository [EleutherAI/gpt-neo](https://github.com/EleutherAI/gpt-neo) by Sid Black, Stella Biderman, Leo Gao, Phil Wang and Connor Leahy.
|
||||
@ -139,21 +106,27 @@ conversion utilities for the following models.
|
||||
1. **[LXMERT](model_doc/lxmert)** (from UNC Chapel Hill) released with the paper [LXMERT: Learning Cross-Modality Encoder Representations from Transformers for Open-Domain Question Answering](https://arxiv.org/abs/1908.07490) by Hao Tan and Mohit Bansal.
|
||||
1. **[M2M100](model_doc/m2m_100)** (from Facebook) released with the paper [Beyond English-Centric Multilingual Machine Translation](https://arxiv.org/abs/2010.11125) by Angela Fan, Shruti Bhosale, Holger Schwenk, Zhiyi Ma, Ahmed El-Kishky, Siddharth Goyal, Mandeep Baines, Onur Celebi, Guillaume Wenzek, Vishrav Chaudhary, Naman Goyal, Tom Birch, Vitaliy Liptchinsky, Sergey Edunov, Edouard Grave, Michael Auli, Armand Joulin.
|
||||
1. **[MarianMT](model_doc/marian)** Machine translation models trained using [OPUS](http://opus.nlpl.eu/) data by Jörg Tiedemann. The [Marian Framework](https://marian-nmt.github.io/) is being developed by the Microsoft Translator Team.
|
||||
1. **[MaskFormer](model_doc/maskformer)** (from Meta and UIUC) released with the paper [Per-Pixel Classification is Not All You Need for Semantic Segmentation](https://arxiv.org/abs/2107.06278) by Bowen Cheng, Alexander G. Schwing, Alexander Kirillov.
|
||||
1. **[MBart](model_doc/mbart)** (from Facebook) released with the paper [Multilingual Denoising Pre-training for Neural Machine Translation](https://arxiv.org/abs/2001.08210) by Yinhan Liu, Jiatao Gu, Naman Goyal, Xian Li, Sergey Edunov, Marjan Ghazvininejad, Mike Lewis, Luke Zettlemoyer.
|
||||
1. **[MBart-50](model_doc/mbart)** (from Facebook) released with the paper [Multilingual Translation with Extensible Multilingual Pretraining and Finetuning](https://arxiv.org/abs/2008.00401) by Yuqing Tang, Chau Tran, Xian Li, Peng-Jen Chen, Naman Goyal, Vishrav Chaudhary, Jiatao Gu, Angela Fan.
|
||||
1. **[Megatron-BERT](model_doc/megatron_bert)** (from NVIDIA) released with the paper [Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism](https://arxiv.org/abs/1909.08053) by Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper and Bryan Catanzaro.
|
||||
1. **[Megatron-BERT](model_doc/megatron-bert)** (from NVIDIA) released with the paper [Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism](https://arxiv.org/abs/1909.08053) by Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper and Bryan Catanzaro.
|
||||
1. **[Megatron-GPT2](model_doc/megatron_gpt2)** (from NVIDIA) released with the paper [Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism](https://arxiv.org/abs/1909.08053) by Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper and Bryan Catanzaro.
|
||||
1. **[MPNet](model_doc/mpnet)** (from Microsoft Research) released with the paper [MPNet: Masked and Permuted Pre-training for Language Understanding](https://arxiv.org/abs/2004.09297) by Kaitao Song, Xu Tan, Tao Qin, Jianfeng Lu, Tie-Yan Liu.
|
||||
1. **[MT5](model_doc/mt5)** (from Google AI) released with the paper [mT5: A massively multilingual pre-trained text-to-text transformer](https://arxiv.org/abs/2010.11934) by Linting Xue, Noah Constant, Adam Roberts, Mihir Kale, Rami Al-Rfou, Aditya Siddhant, Aditya Barua, Colin Raffel.
|
||||
1. **[Nyströmformer](model_doc/nystromformer)** (from the University of Wisconsin - Madison) released with the paper [Nyströmformer: A Nyström-Based Algorithm for Approximating Self-Attention](https://arxiv.org/abs/2102.03902) by Yunyang Xiong, Zhanpeng Zeng, Rudrasis Chakraborty, Mingxing Tan, Glenn Fung, Yin Li, Vikas Singh.
|
||||
1. **[Pegasus](model_doc/pegasus)** (from Google) released with the paper [PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive Summarization](https://arxiv.org/abs/1912.08777) by Jingqing Zhang, Yao Zhao, Mohammad Saleh and Peter J. Liu.
|
||||
1. **[Perceiver IO](model_doc/perceiver)** (from Deepmind) released with the paper [Perceiver IO: A General Architecture for Structured Inputs & Outputs](https://arxiv.org/abs/2107.14795) by Andrew Jaegle, Sebastian Borgeaud, Jean-Baptiste Alayrac, Carl Doersch, Catalin Ionescu, David Ding, Skanda Koppula, Daniel Zoran, Andrew Brock, Evan Shelhamer, Olivier Hénaff, Matthew M. Botvinick, Andrew Zisserman, Oriol Vinyals, João Carreira.
|
||||
1. **[PhoBERT](model_doc/phobert)** (from VinAI Research) released with the paper [PhoBERT: Pre-trained language models for Vietnamese](https://www.aclweb.org/anthology/2020.findings-emnlp.92/) by Dat Quoc Nguyen and Anh Tuan Nguyen.
|
||||
1. **[PLBart](model_doc/plbart)** (from UCLA NLP) released with the paper [Unified Pre-training for Program Understanding and Generation](https://arxiv.org/abs/2103.06333) by Wasi Uddin Ahmad, Saikat Chakraborty, Baishakhi Ray, Kai-Wei Chang.
|
||||
1. **[PoolFormer](model_doc/poolformer)** (from Sea AI Labs) released with the paper [MetaFormer is Actually What You Need for Vision](https://arxiv.org/abs/2111.11418) by Yu, Weihao and Luo, Mi and Zhou, Pan and Si, Chenyang and Zhou, Yichen and Wang, Xinchao and Feng, Jiashi and Yan, Shuicheng.
|
||||
1. **[ProphetNet](model_doc/prophetnet)** (from Microsoft Research) released with the paper [ProphetNet: Predicting Future N-gram for Sequence-to-Sequence Pre-training](https://arxiv.org/abs/2001.04063) by Yu Yan, Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang and Ming Zhou.
|
||||
1. **[QDQBert](model_doc/qdqbert)** (from NVIDIA) released with the paper [Integer Quantization for Deep Learning Inference: Principles and Empirical Evaluation](https://arxiv.org/abs/2004.09602) by Hao Wu, Patrick Judd, Xiaojie Zhang, Mikhail Isaev and Paulius Micikevicius.
|
||||
1. **[REALM](model_doc/realm.html)** (from Google Research) released with the paper [REALM: Retrieval-Augmented Language Model Pre-Training](https://arxiv.org/abs/2002.08909) by Kelvin Guu, Kenton Lee, Zora Tung, Panupong Pasupat and Ming-Wei Chang.
|
||||
1. **[Reformer](model_doc/reformer)** (from Google Research) released with the paper [Reformer: The Efficient Transformer](https://arxiv.org/abs/2001.04451) by Nikita Kitaev, Łukasz Kaiser, Anselm Levskaya.
|
||||
1. **[RemBERT](model_doc/rembert)** (from Google Research) released with the paper [Rethinking embedding coupling in pre-trained language models](https://arxiv.org/pdf/2010.12821.pdf) by Hyung Won Chung, Thibault Févry, Henry Tsai, M. Johnson, Sebastian Ruder.
|
||||
1. **[RoBERTa](model_doc/roberta)** (from Facebook), released together with the paper a [Robustly Optimized BERT Pretraining Approach](https://arxiv.org/abs/1907.11692) by Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, Veselin Stoyanov.
|
||||
1. **[RoFormer](model_doc/roformer)** (from ZhuiyiTechnology), released together with the paper a [RoFormer: Enhanced Transformer with Rotary Position Embedding](https://arxiv.org/pdf/2104.09864v1.pdf) by Jianlin Su and Yu Lu and Shengfeng Pan and Bo Wen and Yunfeng Liu.
|
||||
1. **[RemBERT](model_doc/rembert)** (from Google Research) released with the paper [Rethinking embedding coupling in pre-trained language models](https://arxiv.org/abs/2010.12821) by Hyung Won Chung, Thibault Févry, Henry Tsai, M. Johnson, Sebastian Ruder.
|
||||
1. **[ResNet](model_doc/resnet)** (from Microsoft Research) released with the paper [Deep Residual Learning for Image Recognition](https://arxiv.org/abs/1512.03385) by Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun.
|
||||
1. **[RoBERTa](model_doc/roberta)** (from Facebook), released together with the paper [RoBERTa: A Robustly Optimized BERT Pretraining Approach](https://arxiv.org/abs/1907.11692) by Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, Veselin Stoyanov.
|
||||
1. **[RoFormer](model_doc/roformer)** (from ZhuiyiTechnology), released together with the paper [RoFormer: Enhanced Transformer with Rotary Position Embedding](https://arxiv.org/abs/2104.09864) by Jianlin Su and Yu Lu and Shengfeng Pan and Bo Wen and Yunfeng Liu.
|
||||
1. **[SegFormer](model_doc/segformer)** (from NVIDIA) released with the paper [SegFormer: Simple and Efficient Design for Semantic Segmentation with Transformers](https://arxiv.org/abs/2105.15203) by Enze Xie, Wenhai Wang, Zhiding Yu, Anima Anandkumar, Jose M. Alvarez, Ping Luo.
|
||||
1. **[SEW](model_doc/sew)** (from ASAPP) released with the paper [Performance-Efficiency Trade-offs in Unsupervised Pre-training for Speech Recognition](https://arxiv.org/abs/2109.06870) by Felix Wu, Kwangyoun Kim, Jing Pan, Kyu Han, Kilian Q. Weinberger, Yoav Artzi.
|
||||
1. **[SEW-D](model_doc/sew_d)** (from ASAPP) released with the paper [Performance-Efficiency Trade-offs in Unsupervised Pre-training for Speech Recognition](https://arxiv.org/abs/2109.06870) by Felix Wu, Kwangyoun Kim, Jing Pan, Kyu Han, Kilian Q. Weinberger, Yoav Artzi.
|
||||
@ -161,21 +134,31 @@ conversion utilities for the following models.
|
||||
1. **[SpeechToTextTransformer2](model_doc/speech_to_text_2)** (from Facebook), released together with the paper [Large-Scale Self- and Semi-Supervised Learning for Speech Translation](https://arxiv.org/abs/2104.06678) by Changhan Wang, Anne Wu, Juan Pino, Alexei Baevski, Michael Auli, Alexis Conneau.
|
||||
1. **[Splinter](model_doc/splinter)** (from Tel Aviv University), released together with the paper [Few-Shot Question Answering by Pretraining Span Selection](https://arxiv.org/abs/2101.00438) by Ori Ram, Yuval Kirstain, Jonathan Berant, Amir Globerson, Omer Levy.
|
||||
1. **[SqueezeBert](model_doc/squeezebert)** (from Berkeley) released with the paper [SqueezeBERT: What can computer vision teach NLP about efficient neural networks?](https://arxiv.org/abs/2006.11316) by Forrest N. Iandola, Albert E. Shaw, Ravi Krishna, and Kurt W. Keutzer.
|
||||
1. **[Swin Transformer](model_doc/swin)** (from Microsoft) released with the paper [Swin Transformer: Hierarchical Vision Transformer using Shifted Windows](https://arxiv.org/abs/2103.14030) by Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, Baining Guo.
|
||||
1. **[T5](model_doc/t5)** (from Google AI) released with the paper [Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer](https://arxiv.org/abs/1910.10683) by Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu.
|
||||
1. **[T5v1.1](model_doc/t5v1.1)** (from Google AI) released in the repository [google-research/text-to-text-transfer-transformer](https://github.com/google-research/text-to-text-transfer-transformer/blob/main/released_checkpoints.md#t511) by Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu.
|
||||
1. **[TAPAS](model_doc/tapas)** (from Google AI) released with the paper [TAPAS: Weakly Supervised Table Parsing via Pre-training](https://arxiv.org/abs/2004.02349) by Jonathan Herzig, Paweł Krzysztof Nowak, Thomas Müller, Francesco Piccinno and Julian Martin Eisenschlos.
|
||||
1. **[Transformer-XL](model_doc/transformerxl)** (from Google/CMU) released with the paper [Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context](https://arxiv.org/abs/1901.02860) by Zihang Dai*, Zhilin Yang*, Yiming Yang, Jaime Carbonell, Quoc V. Le, Ruslan Salakhutdinov.
|
||||
1. **[Transformer-XL](model_doc/transfo-xl)** (from Google/CMU) released with the paper [Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context](https://arxiv.org/abs/1901.02860) by Zihang Dai*, Zhilin Yang*, Yiming Yang, Jaime Carbonell, Quoc V. Le, Ruslan Salakhutdinov.
|
||||
1. **[TrOCR](model_doc/trocr)** (from Microsoft), released together with the paper [TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models](https://arxiv.org/abs/2109.10282) by Minghao Li, Tengchao Lv, Lei Cui, Yijuan Lu, Dinei Florencio, Cha Zhang, Zhoujun Li, Furu Wei.
|
||||
1. **[UniSpeech](model_doc/unispeech)** (from Microsoft Research) released with the paper [UniSpeech: Unified Speech Representation Learning with Labeled and Unlabeled Data](https://arxiv.org/abs/2101.07597) by Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei, Michael Zeng, Xuedong Huang.
|
||||
1. **[UniSpeechSat](model_doc/unispeech_sat)** (from Microsoft Research) released with the paper [UNISPEECH-SAT: UNIVERSAL SPEECH REPRESENTATION LEARNING WITH SPEAKER AWARE PRE-TRAINING](https://arxiv.org/abs/2110.05752) by Sanyuan Chen, Yu Wu, Chengyi Wang, Zhengyang Chen, Zhuo Chen, Shujie Liu, Jian Wu, Yao Qian, Furu Wei, Jinyu Li, Xiangzhan Yu.
|
||||
1. **[UniSpeechSat](model_doc/unispeech-sat)** (from Microsoft Research) released with the paper [UNISPEECH-SAT: UNIVERSAL SPEECH REPRESENTATION LEARNING WITH SPEAKER AWARE PRE-TRAINING](https://arxiv.org/abs/2110.05752) by Sanyuan Chen, Yu Wu, Chengyi Wang, Zhengyang Chen, Zhuo Chen, Shujie Liu, Jian Wu, Yao Qian, Furu Wei, Jinyu Li, Xiangzhan Yu.
|
||||
1. **[VAN](model_doc/van)** (from Tsinghua University and Nankai University) released with the paper [Visual Attention Network](https://arxiv.org/abs/2202.09741) by Meng-Hao Guo, Cheng-Ze Lu, Zheng-Ning Liu, Ming-Ming Cheng, Shi-Min Hu.
|
||||
1. **[ViLT](model_doc/vilt)** (from NAVER AI Lab/Kakao Enterprise/Kakao Brain) released with the paper [ViLT: Vision-and-Language Transformer Without Convolution or Region Supervision](https://arxiv.org/abs/2102.03334) by Wonjae Kim, Bokyung Son, Ildoo Kim.
|
||||
1. **[Vision Transformer (ViT)](model_doc/vit)** (from Google AI) released with the paper [An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale](https://arxiv.org/abs/2010.11929) by Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, Neil Houlsby.
|
||||
1. **[ViTMAE](model_doc/vit_mae)** (from Meta AI) released with the paper [Masked Autoencoders Are Scalable Vision Learners](https://arxiv.org/abs/2111.06377) by Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Dollár, Ross Girshick.
|
||||
1. **[VisualBERT](model_doc/visual_bert)** (from UCLA NLP) released with the paper [VisualBERT: A Simple and Performant Baseline for Vision and Language](https://arxiv.org/pdf/1908.03557) by Liunian Harold Li, Mark Yatskar, Da Yin, Cho-Jui Hsieh, Kai-Wei Chang.
|
||||
1. **[WavLM](model_doc/wavlm)** (from Microsoft Research) released with the paper [WavLM: Large-Scale Self-Supervised Pre-Training for Full Stack Speech Processing](https://arxiv.org/abs/2110.13900) by Sanyuan Chen, Chengyi Wang, Zhengyang Chen, Yu Wu, Shujie Liu, Zhuo Chen, Jinyu Li, Naoyuki Kanda, Takuya Yoshioka, Xiong Xiao, Jian Wu, Long Zhou, Shuo Ren, Yanmin Qian, Yao Qian, Jian Wu, Michael Zeng, Furu Wei.
|
||||
1. **[Wav2Vec2](model_doc/wav2vec2)** (from Facebook AI) released with the paper [wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations](https://arxiv.org/abs/2006.11477) by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli.
|
||||
1. **[Wav2Vec2Phoneme](model_doc/wav2vec2_phoneme)** (from Facebook AI) released with the paper [Simple and Effective Zero-shot Cross-lingual Phoneme Recognition](https://arxiv.org/abs/2109.11680) by Qiantong Xu, Alexei Baevski, Michael Auli.
|
||||
1. **[XGLM](model_doc/xglm)** (From Facebook AI) released with the paper [Few-shot Learning with Multilingual Language Models](https://arxiv.org/abs/2112.10668) by Xi Victoria Lin, Todor Mihaylov, Mikel Artetxe, Tianlu Wang, Shuohui Chen, Daniel Simig, Myle Ott, Naman Goyal, Shruti Bhosale, Jingfei Du, Ramakanth Pasunuru, Sam Shleifer, Punit Singh Koura, Vishrav Chaudhary, Brian O'Horo, Jeff Wang, Luke Zettlemoyer, Zornitsa Kozareva, Mona Diab, Veselin Stoyanov, Xian Li.
|
||||
1. **[XLM](model_doc/xlm)** (from Facebook) released together with the paper [Cross-lingual Language Model Pretraining](https://arxiv.org/abs/1901.07291) by Guillaume Lample and Alexis Conneau.
|
||||
1. **[XLM-ProphetNet](model_doc/xlmprophetnet)** (from Microsoft Research) released with the paper [ProphetNet: Predicting Future N-gram for Sequence-to-Sequence Pre-training](https://arxiv.org/abs/2001.04063) by Yu Yan, Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang and Ming Zhou.
|
||||
1. **[XLM-RoBERTa](model_doc/xlmroberta)** (from Facebook AI), released together with the paper [Unsupervised Cross-lingual Representation Learning at Scale](https://arxiv.org/abs/1911.02116) by Alexis Conneau*, Kartikay Khandelwal*, Naman Goyal, Vishrav Chaudhary, Guillaume Wenzek, Francisco Guzmán, Edouard Grave, Myle Ott, Luke Zettlemoyer and Veselin Stoyanov.
|
||||
1. **[XLM-ProphetNet](model_doc/xlm-prophetnet)** (from Microsoft Research) released with the paper [ProphetNet: Predicting Future N-gram for Sequence-to-Sequence Pre-training](https://arxiv.org/abs/2001.04063) by Yu Yan, Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang and Ming Zhou.
|
||||
1. **[XLM-RoBERTa](model_doc/xlm-roberta)** (from Facebook AI), released together with the paper [Unsupervised Cross-lingual Representation Learning at Scale](https://arxiv.org/abs/1911.02116) by Alexis Conneau*, Kartikay Khandelwal*, Naman Goyal, Vishrav Chaudhary, Guillaume Wenzek, Francisco Guzmán, Edouard Grave, Myle Ott, Luke Zettlemoyer and Veselin Stoyanov.
|
||||
1. **[XLM-RoBERTa-XL](model_doc/xlm-roberta-xl)** (from Facebook AI), released together with the paper [Larger-Scale Transformers for Multilingual Masked Language Modeling](https://arxiv.org/abs/2105.00572) by Naman Goyal, Jingfei Du, Myle Ott, Giri Anantharaman, Alexis Conneau.
|
||||
1. **[XLNet](model_doc/xlnet)** (from Google/CMU) released with the paper [XLNet: Generalized Autoregressive Pretraining for Language Understanding](https://arxiv.org/abs/1906.08237) by Zhilin Yang*, Zihang Dai*, Yiming Yang, Jaime Carbonell, Ruslan Salakhutdinov, Quoc V. Le.
|
||||
1. **[XLSR-Wav2Vec2](model_doc/xlsr_wav2vec2)** (from Facebook AI) released with the paper [Unsupervised Cross-Lingual Representation Learning For Speech Recognition](https://arxiv.org/abs/2006.13979) by Alexis Conneau, Alexei Baevski, Ronan Collobert, Abdelrahman Mohamed, Michael Auli.
|
||||
1. **[XLS-R](model_doc/xls_r)** (from Facebook AI) released with the paper [XLS-R: Self-supervised Cross-lingual Speech Representation Learning at Scale](https://arxiv.org/abs/2111.09296) by Arun Babu, Changhan Wang, Andros Tjandra, Kushal Lakhotia, Qiantong Xu, Naman Goyal, Kritika Singh, Patrick von Platen, Yatharth Saraf, Juan Pino, Alexei Baevski, Alexis Conneau, Michael Auli.
|
||||
1. **[YOSO](model_doc/yoso)** (from the University of Wisconsin - Madison) released with the paper [You Only Sample (Almost) Once: Linear Cost Self-Attention Via Bernoulli Sampling](https://arxiv.org/abs/2111.09714) by Zhanpeng Zeng, Yunyang Xiong, Sathya N. Ravi, Shailesh Acharya, Glenn Fung, Vikas Singh.
|
||||
|
||||
|
||||
### Supported frameworks
|
||||
@ -187,7 +170,7 @@ Flax), PyTorch, and/or TensorFlow.
|
||||
<!--This table is updated automatically from the auto modules with _make fix-copies_. Do not update manually!-->
|
||||
|
||||
| Model | Tokenizer slow | Tokenizer fast | PyTorch support | TensorFlow support | Flax Support |
|
||||
|-----------------------------|----------------|----------------|-----------------|--------------------|--------------|
|
||||
|:---------------------------:|:--------------:|:--------------:|:---------------:|:------------------:|:------------:|
|
||||
| ALBERT | ✅ | ✅ | ✅ | ✅ | ✅ |
|
||||
| BART | ✅ | ✅ | ✅ | ✅ | ✅ |
|
||||
| BEiT | ❌ | ❌ | ✅ | ❌ | ✅ |
|
||||
@ -199,23 +182,29 @@ Flax), PyTorch, and/or TensorFlow.
|
||||
| BlenderbotSmall | ✅ | ✅ | ✅ | ✅ | ✅ |
|
||||
| CamemBERT | ✅ | ✅ | ✅ | ✅ | ❌ |
|
||||
| Canine | ✅ | ❌ | ✅ | ❌ | ❌ |
|
||||
| CLIP | ✅ | ✅ | ✅ | ❌ | ✅ |
|
||||
| CLIP | ✅ | ✅ | ✅ | ✅ | ✅ |
|
||||
| ConvBERT | ✅ | ✅ | ✅ | ✅ | ❌ |
|
||||
| ConvNext | ❌ | ❌ | ✅ | ✅ | ❌ |
|
||||
| CTRL | ✅ | ❌ | ✅ | ✅ | ❌ |
|
||||
| Data2VecAudio | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||
| Data2VecText | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||
| DeBERTa | ✅ | ✅ | ✅ | ✅ | ❌ |
|
||||
| DeBERTa-v2 | ✅ | ❌ | ✅ | ✅ | ❌ |
|
||||
| Decision Transformer | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||
| DeiT | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||
| DETR | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||
| DistilBERT | ✅ | ✅ | ✅ | ✅ | ✅ |
|
||||
| DPR | ✅ | ✅ | ✅ | ✅ | ❌ |
|
||||
| DPT | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||
| ELECTRA | ✅ | ✅ | ✅ | ✅ | ✅ |
|
||||
| Encoder decoder | ❌ | ❌ | ✅ | ✅ | ✅ |
|
||||
| FairSeq Machine-Translation | ✅ | ❌ | ✅ | ❌ | ❌ |
|
||||
| FlauBERT | ✅ | ❌ | ✅ | ✅ | ❌ |
|
||||
| FNet | ✅ | ✅ | ✅ | ❌ | ❌ |
|
||||
| Funnel Transformer | ✅ | ✅ | ✅ | ✅ | ❌ |
|
||||
| GLPN | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||
| GPT Neo | ❌ | ❌ | ✅ | ❌ | ✅ |
|
||||
| GPT-J | ❌ | ❌ | ✅ | ❌ | ✅ |
|
||||
| GPT-J | ❌ | ❌ | ✅ | ✅ | ✅ |
|
||||
| Hubert | ❌ | ❌ | ✅ | ✅ | ❌ |
|
||||
| I-BERT | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||
| ImageGPT | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||
@ -227,45 +216,59 @@ Flax), PyTorch, and/or TensorFlow.
|
||||
| LXMERT | ✅ | ✅ | ✅ | ✅ | ❌ |
|
||||
| M2M100 | ✅ | ❌ | ✅ | ❌ | ❌ |
|
||||
| Marian | ✅ | ❌ | ✅ | ✅ | ✅ |
|
||||
| MaskFormer | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||
| mBART | ✅ | ✅ | ✅ | ✅ | ✅ |
|
||||
| MegatronBert | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||
| MobileBERT | ✅ | ✅ | ✅ | ✅ | ❌ |
|
||||
| MPNet | ✅ | ✅ | ✅ | ✅ | ❌ |
|
||||
| mT5 | ✅ | ✅ | ✅ | ✅ | ✅ |
|
||||
| Nystromformer | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||
| OpenAI GPT | ✅ | ✅ | ✅ | ✅ | ❌ |
|
||||
| OpenAI GPT-2 | ✅ | ✅ | ✅ | ✅ | ✅ |
|
||||
| Pegasus | ✅ | ✅ | ✅ | ✅ | ✅ |
|
||||
| Perceiver | ✅ | ❌ | ✅ | ❌ | ❌ |
|
||||
| PLBart | ✅ | ❌ | ✅ | ❌ | ❌ |
|
||||
| PoolFormer | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||
| ProphetNet | ✅ | ❌ | ✅ | ❌ | ❌ |
|
||||
| QDQBert | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||
| RAG | ✅ | ❌ | ✅ | ✅ | ❌ |
|
||||
| Realm | ✅ | ✅ | ✅ | ❌ | ❌ |
|
||||
| Reformer | ✅ | ✅ | ✅ | ❌ | ❌ |
|
||||
| RemBERT | ✅ | ✅ | ✅ | ✅ | ❌ |
|
||||
| ResNet | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||
| RetriBERT | ✅ | ✅ | ✅ | ❌ | ❌ |
|
||||
| RoBERTa | ✅ | ✅ | ✅ | ✅ | ✅ |
|
||||
| RoFormer | ✅ | ✅ | ✅ | ✅ | ❌ |
|
||||
| RoFormer | ✅ | ✅ | ✅ | ✅ | ✅ |
|
||||
| SegFormer | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||
| SEW | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||
| SEW-D | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||
| Speech Encoder decoder | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||
| Speech2Text | ✅ | ❌ | ✅ | ❌ | ❌ |
|
||||
| Speech Encoder decoder | ❌ | ❌ | ✅ | ❌ | ✅ |
|
||||
| Speech2Text | ✅ | ❌ | ✅ | ✅ | ❌ |
|
||||
| Speech2Text2 | ✅ | ❌ | ❌ | ❌ | ❌ |
|
||||
| Splinter | ✅ | ✅ | ✅ | ❌ | ❌ |
|
||||
| SqueezeBERT | ✅ | ✅ | ✅ | ❌ | ❌ |
|
||||
| Swin | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||
| T5 | ✅ | ✅ | ✅ | ✅ | ✅ |
|
||||
| TAPAS | ✅ | ❌ | ✅ | ✅ | ❌ |
|
||||
| Transformer-XL | ✅ | ❌ | ✅ | ✅ | ❌ |
|
||||
| TrOCR | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||
| UniSpeech | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||
| UniSpeechSat | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||
| Vision Encoder decoder | ❌ | ❌ | ✅ | ❌ | ✅ |
|
||||
| VAN | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||
| ViLT | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||
| Vision Encoder decoder | ❌ | ❌ | ✅ | ✅ | ✅ |
|
||||
| VisionTextDualEncoder | ❌ | ❌ | ✅ | ❌ | ✅ |
|
||||
| VisualBert | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||
| ViT | ❌ | ❌ | ✅ | ✅ | ✅ |
|
||||
| ViTMAE | ❌ | ❌ | ✅ | ✅ | ❌ |
|
||||
| Wav2Vec2 | ✅ | ❌ | ✅ | ✅ | ✅ |
|
||||
| WavLM | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||
| XGLM | ✅ | ✅ | ✅ | ❌ | ✅ |
|
||||
| XLM | ✅ | ❌ | ✅ | ✅ | ❌ |
|
||||
| XLM-RoBERTa | ✅ | ✅ | ✅ | ✅ | ❌ |
|
||||
| XLM-RoBERTa | ✅ | ✅ | ✅ | ✅ | ✅ |
|
||||
| XLM-RoBERTa-XL | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||
| XLMProphetNet | ✅ | ❌ | ✅ | ❌ | ❌ |
|
||||
| XLNet | ✅ | ✅ | ✅ | ✅ | ❌ |
|
||||
| YOSO | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||
|
||||
<!-- End table-->
|
||||
235
docs/source/en/installation.mdx
Normal file
235
docs/source/en/installation.mdx
Normal file
@ -0,0 +1,235 @@
|
||||
<!---
|
||||
Copyright 2022 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
-->
|
||||
|
||||
# Installation
|
||||
|
||||
Install 🤗 Transformers for whichever deep learning library you're working with, setup your cache, and optionally configure 🤗 Transformers to run offline.
|
||||
|
||||
🤗 Transformers is tested on Python 3.6+, PyTorch 1.1.0+, TensorFlow 2.0+, and Flax. Follow the installation instructions below for the deep learning library you are using:
|
||||
|
||||
* [PyTorch](https://pytorch.org/get-started/locally/) installation instructions.
|
||||
* [TensorFlow 2.0](https://www.tensorflow.org/install/pip) installation instructions.
|
||||
* [Flax](https://flax.readthedocs.io/en/latest/) installation instructions.
|
||||
|
||||
## Install with pip
|
||||
|
||||
You should install 🤗 Transformers in a [virtual environment](https://docs.python.org/3/library/venv.html). If you're unfamiliar with Python virtual environments, take a look at this [guide](https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/). A virtual environment makes it easier to manage different projects, and avoid compatibility issues between dependencies.
|
||||
|
||||
Start by creating a virtual environment in your project directory:
|
||||
|
||||
```bash
|
||||
python -m venv .env
|
||||
```
|
||||
|
||||
Activate the virtual environment:
|
||||
|
||||
```bash
|
||||
source .env/bin/activate
|
||||
```
|
||||
|
||||
Now you're ready to install 🤗 Transformers with the following command:
|
||||
|
||||
```bash
|
||||
pip install transformers
|
||||
```
|
||||
|
||||
For CPU-support only, you can conveniently install 🤗 Transformers and a deep learning library in one line. For example, install 🤗 Transformers and PyTorch with:
|
||||
|
||||
```bash
|
||||
pip install transformers[torch]
|
||||
```
|
||||
|
||||
🤗 Transformers and TensorFlow 2.0:
|
||||
|
||||
```bash
|
||||
pip install transformers[tf-cpu]
|
||||
```
|
||||
|
||||
🤗 Transformers and Flax:
|
||||
|
||||
```bash
|
||||
pip install transformers[flax]
|
||||
```
|
||||
|
||||
Finally, check if 🤗 Transformers has been properly installed by running the following command. It will download a pretrained model:
|
||||
|
||||
```bash
|
||||
python -c "from transformers import pipeline; print(pipeline('sentiment-analysis')('we love you'))"
|
||||
```
|
||||
|
||||
Then print out the label and score:
|
||||
|
||||
```bash
|
||||
[{'label': 'POSITIVE', 'score': 0.9998704791069031}]
|
||||
```
|
||||
|
||||
## Install from source
|
||||
|
||||
Install 🤗 Transformers from source with the following command:
|
||||
|
||||
```bash
|
||||
pip install git+https://github.com/huggingface/transformers
|
||||
```
|
||||
|
||||
This command installs the bleeding edge `main` version rather than the latest `stable` version. The `main` version is useful for staying up-to-date with the latest developments. For instance, if a bug has been fixed since the last official release but a new release hasn't been rolled out yet. However, this means the `main` version may not always be stable. We strive to keep the `main` version operational, and most issues are usually resolved within a few hours or a day. If you run into a problem, please open an [Issue](https://github.com/huggingface/transformers/issues) so we can fix it even sooner!
|
||||
|
||||
Check if 🤗 Transformers has been properly installed by running the following command:
|
||||
|
||||
```bash
|
||||
python -c "from transformers import pipeline; print(pipeline('sentiment-analysis')('I love you'))"
|
||||
```
|
||||
|
||||
## Editable install
|
||||
|
||||
You will need an editable install if you'd like to:
|
||||
|
||||
* Use the `main` version of the source code.
|
||||
* Contribute to 🤗 Transformers and need to test changes in the code.
|
||||
|
||||
Clone the repository and install 🤗 Transformers with the following commands:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/huggingface/transformers.git
|
||||
cd transformers
|
||||
pip install -e .
|
||||
```
|
||||
|
||||
These commands will link the folder you cloned the repository to and your Python library paths. Python will now look inside the folder you cloned to in addition to the normal library paths. For example, if your Python packages are typically installed in `~/anaconda3/envs/main/lib/python3.7/site-packages/`, Python will also search the folder you cloned to: `~/transformers/`.
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
You must keep the `transformers` folder if you want to keep using the library.
|
||||
|
||||
</Tip>
|
||||
|
||||
Now you can easily update your clone to the latest version of 🤗 Transformers with the following command:
|
||||
|
||||
```bash
|
||||
cd ~/transformers/
|
||||
git pull
|
||||
```
|
||||
|
||||
Your Python environment will find the `main` version of 🤗 Transformers on the next run.
|
||||
|
||||
## Install with conda
|
||||
|
||||
Install from the conda channel `huggingface`:
|
||||
|
||||
```bash
|
||||
conda install -c huggingface transformers
|
||||
```
|
||||
|
||||
## Cache setup
|
||||
|
||||
Pretrained models are downloaded and locally cached at: `~/.cache/huggingface/transformers/`. This is the default directory given by the shell environment variable `TRANSFORMERS_CACHE`. On Windows, the default directory is given by `C:\Users\username\.cache\huggingface\transformers`. You can change the shell environment variables shown below - in order of priority - to specify a different cache directory:
|
||||
|
||||
1. Shell environment variable (default): `TRANSFORMERS_CACHE`.
|
||||
2. Shell environment variable: `HF_HOME` + `transformers/`.
|
||||
3. Shell environment variable: `XDG_CACHE_HOME` + `/huggingface/transformers`.
|
||||
|
||||
<Tip>
|
||||
|
||||
🤗 Transformers will use the shell environment variables `PYTORCH_TRANSFORMERS_CACHE` or `PYTORCH_PRETRAINED_BERT_CACHE` if you are coming from an earlier iteration of this library and have set those environment variables, unless you specify the shell environment variable `TRANSFORMERS_CACHE`.
|
||||
|
||||
</Tip>
|
||||
|
||||
## Offline mode
|
||||
|
||||
🤗 Transformers is able to run in a firewalled or offline environment by only using local files. Set the environment variable `TRANSFORMERS_OFFLINE=1` to enable this behavior.
|
||||
|
||||
<Tip>
|
||||
|
||||
Add [🤗 Datasets](https://huggingface.co/docs/datasets/) to your offline training workflow by setting the environment variable `HF_DATASETS_OFFLINE=1`.
|
||||
|
||||
</Tip>
|
||||
|
||||
For example, you would typically run a program on a normal network firewalled to external instances with the following command:
|
||||
|
||||
```bash
|
||||
python examples/pytorch/translation/run_translation.py --model_name_or_path t5-small --dataset_name wmt16 --dataset_config ro-en ...
|
||||
```
|
||||
|
||||
Run this same program in an offline instance with:
|
||||
|
||||
```bash
|
||||
HF_DATASETS_OFFLINE=1 TRANSFORMERS_OFFLINE=1 \
|
||||
python examples/pytorch/translation/run_translation.py --model_name_or_path t5-small --dataset_name wmt16 --dataset_config ro-en ...
|
||||
```
|
||||
|
||||
The script should now run without hanging or waiting to timeout because it knows it should only look for local files.
|
||||
|
||||
### Fetch models and tokenizers to use offline
|
||||
|
||||
Another option for using 🤗 Transformers offline is to download the files ahead of time, and then point to their local path when you need to use them offline. There are three ways to do this:
|
||||
|
||||
* Download a file through the user interface on the [Model Hub](https://huggingface.co/models) by clicking on the ↓ icon.
|
||||
|
||||

|
||||
|
||||
* Use the [`PreTrainedModel.from_pretrained`] and [`PreTrainedModel.save_pretrained`] workflow:
|
||||
|
||||
1. Download your files ahead of time with [`PreTrainedModel.from_pretrained`]:
|
||||
|
||||
```py
|
||||
>>> from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
|
||||
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("bigscience/T0_3B")
|
||||
>>> model = AutoModelForSeq2SeqLM.from_pretrained("bigscience/T0_3B")
|
||||
```
|
||||
|
||||
2. Save your files to a specified directory with [`PreTrainedModel.save_pretrained`]:
|
||||
|
||||
```py
|
||||
>>> tokenizer.save_pretrained("./your/path/bigscience_t0")
|
||||
>>> model.save_pretrained("./your/path/bigscience_t0")
|
||||
```
|
||||
|
||||
3. Now when you're offline, reload your files with [`PreTrainedModel.from_pretrained`] from the specified directory:
|
||||
|
||||
```py
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("./your/path/bigscience_t0")
|
||||
>>> model = AutoModel.from_pretrained("./your/path/bigscience_t0")
|
||||
```
|
||||
|
||||
* Programmatically download files with the [huggingface_hub](https://github.com/huggingface/huggingface_hub/tree/main/src/huggingface_hub) library:
|
||||
|
||||
1. Install the `huggingface_hub` library in your virtual environment:
|
||||
|
||||
```bash
|
||||
python -m pip install huggingface_hub
|
||||
```
|
||||
|
||||
2. Use the [`hf_hub_download`](https://huggingface.co/docs/hub/adding-a-library#download-files-from-the-hub) function to download a file to a specific path. For example, the following command downloads the `config.json` file from the [T0](https://huggingface.co/bigscience/T0_3B) model to your desired path:
|
||||
|
||||
```py
|
||||
>>> from huggingface_hub import hf_hub_download
|
||||
|
||||
>>> hf_hub_download(repo_id="bigscience/T0_3B", filename="config.json", cache_dir="./your/path/bigscience_t0")
|
||||
```
|
||||
|
||||
Once your file is downloaded and locally cached, specify it's local path to load and use it:
|
||||
|
||||
```py
|
||||
>>> from transformers import AutoConfig
|
||||
|
||||
>>> config = AutoConfig.from_pretrained("./your/path/bigscience_t0/config.json")
|
||||
```
|
||||
|
||||
<Tip>
|
||||
|
||||
See the [How to download files from the Hub](https://huggingface.co/docs/hub/how-to-downstream) section for more details on downloading files stored on the Hub.
|
||||
|
||||
</Tip>
|
||||
46
docs/source/en/internal/file_utils.mdx
Normal file
46
docs/source/en/internal/file_utils.mdx
Normal file
@ -0,0 +1,46 @@
|
||||
<!--Copyright 2021 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
# General Utilities
|
||||
|
||||
This page lists all of Transformers general utility functions that are found in the file `utils.py`.
|
||||
|
||||
Most of those are only useful if you are studying the general code in the library.
|
||||
|
||||
|
||||
## Enums and namedtuples
|
||||
|
||||
[[autodoc]] utils.ExplicitEnum
|
||||
|
||||
[[autodoc]] utils.PaddingStrategy
|
||||
|
||||
[[autodoc]] utils.TensorType
|
||||
|
||||
## Special Decorators
|
||||
|
||||
[[autodoc]] utils.add_start_docstrings
|
||||
|
||||
[[autodoc]] utils.add_start_docstrings_to_model_forward
|
||||
|
||||
[[autodoc]] utils.add_end_docstrings
|
||||
|
||||
[[autodoc]] utils.add_code_sample_docstrings
|
||||
|
||||
[[autodoc]] utils.replace_return_docstrings
|
||||
|
||||
## Special Properties
|
||||
|
||||
[[autodoc]] utils.cached_property
|
||||
|
||||
## Other Utilities
|
||||
|
||||
[[autodoc]] utils._LazyModule
|
||||
254
docs/source/en/internal/generation_utils.mdx
Normal file
254
docs/source/en/internal/generation_utils.mdx
Normal file
@ -0,0 +1,254 @@
|
||||
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
# Utilities for Generation
|
||||
|
||||
This page lists all the utility functions used by [`~generation_utils.GenerationMixin.generate`],
|
||||
[`~generation_utils.GenerationMixin.greedy_search`],
|
||||
[`~generation_utils.GenerationMixin.sample`],
|
||||
[`~generation_utils.GenerationMixin.beam_search`],
|
||||
[`~generation_utils.GenerationMixin.beam_sample`],
|
||||
[`~generation_utils.GenerationMixin.group_beam_search`], and
|
||||
[`~generation_utils.GenerationMixin.constrained_beam_search`].
|
||||
|
||||
Most of those are only useful if you are studying the code of the generate methods in the library.
|
||||
|
||||
## Generate Outputs
|
||||
|
||||
The output of [`~generation_utils.GenerationMixin.generate`] is an instance of a subclass of
|
||||
[`~utils.ModelOutput`]. This output is a data structure containing all the information returned
|
||||
by [`~generation_utils.GenerationMixin.generate`], but that can also be used as tuple or dictionary.
|
||||
|
||||
Here's an example:
|
||||
|
||||
```python
|
||||
from transformers import GPT2Tokenizer, GPT2LMHeadModel
|
||||
|
||||
tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
|
||||
model = GPT2LMHeadModel.from_pretrained("gpt2")
|
||||
|
||||
inputs = tokenizer("Hello, my dog is cute and ", return_tensors="pt")
|
||||
generation_output = model.generate(**inputs, return_dict_in_generate=True, output_scores=True)
|
||||
```
|
||||
|
||||
The `generation_output` object is a [`~generation_utils.GreedySearchDecoderOnlyOutput`], as we can
|
||||
see in the documentation of that class below, it means it has the following attributes:
|
||||
|
||||
- `sequences`: the generated sequences of tokens
|
||||
- `scores` (optional): the prediction scores of the language modelling head, for each generation step
|
||||
- `hidden_states` (optional): the hidden states of the model, for each generation step
|
||||
- `attentions` (optional): the attention weights of the model, for each generation step
|
||||
|
||||
Here we have the `scores` since we passed along `output_scores=True`, but we don't have `hidden_states` and
|
||||
`attentions` because we didn't pass `output_hidden_states=True` or `output_attentions=True`.
|
||||
|
||||
You can access each attribute as you would usually do, and if that attribute has not been returned by the model, you
|
||||
will get `None`. Here for instance `generation_output.scores` are all the generated prediction scores of the
|
||||
language modeling head, and `generation_output.attentions` is `None`.
|
||||
|
||||
When using our `generation_output` object as a tuple, it only keeps the attributes that don't have `None` values.
|
||||
Here, for instance, it has two elements, `loss` then `logits`, so
|
||||
|
||||
```python
|
||||
generation_output[:2]
|
||||
```
|
||||
|
||||
will return the tuple `(generation_output.sequences, generation_output.scores)` for instance.
|
||||
|
||||
When using our `generation_output` object as a dictionary, it only keeps the attributes that don't have `None`
|
||||
values. Here, for instance, it has two keys that are `sequences` and `scores`.
|
||||
|
||||
We document here all output types.
|
||||
|
||||
|
||||
### GreedySearchOutput
|
||||
|
||||
[[autodoc]] generation_utils.GreedySearchDecoderOnlyOutput
|
||||
|
||||
[[autodoc]] generation_utils.GreedySearchEncoderDecoderOutput
|
||||
|
||||
[[autodoc]] generation_flax_utils.FlaxGreedySearchOutput
|
||||
|
||||
### SampleOutput
|
||||
|
||||
[[autodoc]] generation_utils.SampleDecoderOnlyOutput
|
||||
|
||||
[[autodoc]] generation_utils.SampleEncoderDecoderOutput
|
||||
|
||||
[[autodoc]] generation_flax_utils.FlaxSampleOutput
|
||||
|
||||
### BeamSearchOutput
|
||||
|
||||
[[autodoc]] generation_utils.BeamSearchDecoderOnlyOutput
|
||||
|
||||
[[autodoc]] generation_utils.BeamSearchEncoderDecoderOutput
|
||||
|
||||
### BeamSampleOutput
|
||||
|
||||
[[autodoc]] generation_utils.BeamSampleDecoderOnlyOutput
|
||||
|
||||
[[autodoc]] generation_utils.BeamSampleEncoderDecoderOutput
|
||||
|
||||
## LogitsProcessor
|
||||
|
||||
A [`LogitsProcessor`] can be used to modify the prediction scores of a language model head for
|
||||
generation.
|
||||
|
||||
[[autodoc]] LogitsProcessor
|
||||
- __call__
|
||||
|
||||
[[autodoc]] LogitsProcessorList
|
||||
- __call__
|
||||
|
||||
[[autodoc]] LogitsWarper
|
||||
- __call__
|
||||
|
||||
[[autodoc]] MinLengthLogitsProcessor
|
||||
- __call__
|
||||
|
||||
[[autodoc]] TemperatureLogitsWarper
|
||||
- __call__
|
||||
|
||||
[[autodoc]] RepetitionPenaltyLogitsProcessor
|
||||
- __call__
|
||||
|
||||
[[autodoc]] TopPLogitsWarper
|
||||
- __call__
|
||||
|
||||
[[autodoc]] TopKLogitsWarper
|
||||
- __call__
|
||||
|
||||
[[autodoc]] NoRepeatNGramLogitsProcessor
|
||||
- __call__
|
||||
|
||||
[[autodoc]] NoBadWordsLogitsProcessor
|
||||
- __call__
|
||||
|
||||
[[autodoc]] PrefixConstrainedLogitsProcessor
|
||||
- __call__
|
||||
|
||||
[[autodoc]] HammingDiversityLogitsProcessor
|
||||
- __call__
|
||||
|
||||
[[autodoc]] ForcedBOSTokenLogitsProcessor
|
||||
- __call__
|
||||
|
||||
[[autodoc]] ForcedEOSTokenLogitsProcessor
|
||||
- __call__
|
||||
|
||||
[[autodoc]] InfNanRemoveLogitsProcessor
|
||||
- __call__
|
||||
|
||||
[[autodoc]] TFLogitsProcessor
|
||||
- __call__
|
||||
|
||||
[[autodoc]] TFLogitsProcessorList
|
||||
- __call__
|
||||
|
||||
[[autodoc]] TFLogitsWarper
|
||||
- __call__
|
||||
|
||||
[[autodoc]] TFTemperatureLogitsWarper
|
||||
- __call__
|
||||
|
||||
[[autodoc]] TFTopPLogitsWarper
|
||||
- __call__
|
||||
|
||||
[[autodoc]] TFTopKLogitsWarper
|
||||
- __call__
|
||||
|
||||
[[autodoc]] TFMinLengthLogitsProcessor
|
||||
- __call__
|
||||
|
||||
[[autodoc]] TFNoBadWordsLogitsProcessor
|
||||
- __call__
|
||||
|
||||
[[autodoc]] TFNoRepeatNGramLogitsProcessor
|
||||
- __call__
|
||||
|
||||
[[autodoc]] TFRepetitionPenaltyLogitsProcessor
|
||||
- __call__
|
||||
|
||||
[[autodoc]] FlaxLogitsProcessor
|
||||
- __call__
|
||||
|
||||
[[autodoc]] FlaxLogitsProcessorList
|
||||
- __call__
|
||||
|
||||
[[autodoc]] FlaxLogitsWarper
|
||||
- __call__
|
||||
|
||||
[[autodoc]] FlaxTemperatureLogitsWarper
|
||||
- __call__
|
||||
|
||||
[[autodoc]] FlaxTopPLogitsWarper
|
||||
- __call__
|
||||
|
||||
[[autodoc]] FlaxTopKLogitsWarper
|
||||
- __call__
|
||||
|
||||
[[autodoc]] FlaxForcedBOSTokenLogitsProcessor
|
||||
- __call__
|
||||
|
||||
[[autodoc]] FlaxForcedEOSTokenLogitsProcessor
|
||||
- __call__
|
||||
|
||||
[[autodoc]] FlaxMinLengthLogitsProcessor
|
||||
- __call__
|
||||
|
||||
## StoppingCriteria
|
||||
|
||||
A [`StoppingCriteria`] can be used to change when to stop generation (other than EOS token).
|
||||
|
||||
[[autodoc]] StoppingCriteria
|
||||
- __call__
|
||||
|
||||
[[autodoc]] StoppingCriteriaList
|
||||
- __call__
|
||||
|
||||
[[autodoc]] MaxLengthCriteria
|
||||
- __call__
|
||||
|
||||
[[autodoc]] MaxTimeCriteria
|
||||
- __call__
|
||||
|
||||
## Constraints
|
||||
|
||||
A [`Constraint`] can be used to force the generation to include specific tokens or sequences in the output.
|
||||
|
||||
[[autodoc]] Constraint
|
||||
|
||||
[[autodoc]] PhrasalConstraint
|
||||
|
||||
[[autodoc]] DisjunctiveConstraint
|
||||
|
||||
[[autodoc]] ConstraintListState
|
||||
|
||||
## BeamSearch
|
||||
|
||||
[[autodoc]] BeamScorer
|
||||
- process
|
||||
- finalize
|
||||
|
||||
[[autodoc]] BeamSearchScorer
|
||||
- process
|
||||
- finalize
|
||||
|
||||
[[autodoc]] ConstrainedBeamSearchScorer
|
||||
- process
|
||||
- finalize
|
||||
|
||||
## Utilities
|
||||
|
||||
[[autodoc]] top_k_top_p_filtering
|
||||
|
||||
[[autodoc]] tf_top_k_top_p_filtering
|
||||
82
docs/source/en/internal/modeling_utils.mdx
Normal file
82
docs/source/en/internal/modeling_utils.mdx
Normal file
@ -0,0 +1,82 @@
|
||||
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
# Custom Layers and Utilities
|
||||
|
||||
This page lists all the custom layers used by the library, as well as the utility functions it provides for modeling.
|
||||
|
||||
Most of those are only useful if you are studying the code of the models in the library.
|
||||
|
||||
|
||||
## Pytorch custom modules
|
||||
|
||||
[[autodoc]] modeling_utils.Conv1D
|
||||
|
||||
[[autodoc]] modeling_utils.PoolerStartLogits
|
||||
- forward
|
||||
|
||||
[[autodoc]] modeling_utils.PoolerEndLogits
|
||||
- forward
|
||||
|
||||
[[autodoc]] modeling_utils.PoolerAnswerClass
|
||||
- forward
|
||||
|
||||
[[autodoc]] modeling_utils.SquadHeadOutput
|
||||
|
||||
[[autodoc]] modeling_utils.SQuADHead
|
||||
- forward
|
||||
|
||||
[[autodoc]] modeling_utils.SequenceSummary
|
||||
- forward
|
||||
|
||||
## PyTorch Helper Functions
|
||||
|
||||
[[autodoc]] apply_chunking_to_forward
|
||||
|
||||
[[autodoc]] modeling_utils.find_pruneable_heads_and_indices
|
||||
|
||||
[[autodoc]] modeling_utils.prune_layer
|
||||
|
||||
[[autodoc]] modeling_utils.prune_conv1d_layer
|
||||
|
||||
[[autodoc]] modeling_utils.prune_linear_layer
|
||||
|
||||
## TensorFlow custom layers
|
||||
|
||||
[[autodoc]] modeling_tf_utils.TFConv1D
|
||||
|
||||
[[autodoc]] modeling_tf_utils.TFSharedEmbeddings
|
||||
- call
|
||||
|
||||
[[autodoc]] modeling_tf_utils.TFSequenceSummary
|
||||
|
||||
## TensorFlow loss functions
|
||||
|
||||
[[autodoc]] modeling_tf_utils.TFCausalLanguageModelingLoss
|
||||
|
||||
[[autodoc]] modeling_tf_utils.TFMaskedLanguageModelingLoss
|
||||
|
||||
[[autodoc]] modeling_tf_utils.TFMultipleChoiceLoss
|
||||
|
||||
[[autodoc]] modeling_tf_utils.TFQuestionAnsweringLoss
|
||||
|
||||
[[autodoc]] modeling_tf_utils.TFSequenceClassificationLoss
|
||||
|
||||
[[autodoc]] modeling_tf_utils.TFTokenClassificationLoss
|
||||
|
||||
## TensorFlow Helper Functions
|
||||
|
||||
[[autodoc]] modeling_tf_utils.get_initializer
|
||||
|
||||
[[autodoc]] modeling_tf_utils.keras_serializable
|
||||
|
||||
[[autodoc]] modeling_tf_utils.shape_list
|
||||
40
docs/source/en/internal/pipelines_utils.mdx
Normal file
40
docs/source/en/internal/pipelines_utils.mdx
Normal file
@ -0,0 +1,40 @@
|
||||
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
# Utilities for pipelines
|
||||
|
||||
This page lists all the utility functions the library provides for pipelines.
|
||||
|
||||
Most of those are only useful if you are studying the code of the models in the library.
|
||||
|
||||
|
||||
## Argument handling
|
||||
|
||||
[[autodoc]] pipelines.ArgumentHandler
|
||||
|
||||
[[autodoc]] pipelines.ZeroShotClassificationArgumentHandler
|
||||
|
||||
[[autodoc]] pipelines.QuestionAnsweringArgumentHandler
|
||||
|
||||
## Data format
|
||||
|
||||
[[autodoc]] pipelines.PipelineDataFormat
|
||||
|
||||
[[autodoc]] pipelines.CsvPipelineDataFormat
|
||||
|
||||
[[autodoc]] pipelines.JsonPipelineDataFormat
|
||||
|
||||
[[autodoc]] pipelines.PipedPipelineDataFormat
|
||||
|
||||
## Utilities
|
||||
|
||||
[[autodoc]] pipelines.PipelineException
|
||||
38
docs/source/en/internal/tokenization_utils.mdx
Normal file
38
docs/source/en/internal/tokenization_utils.mdx
Normal file
@ -0,0 +1,38 @@
|
||||
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
# Utilities for Tokenizers
|
||||
|
||||
This page lists all the utility functions used by the tokenizers, mainly the class
|
||||
[`~tokenization_utils_base.PreTrainedTokenizerBase`] that implements the common methods between
|
||||
[`PreTrainedTokenizer`] and [`PreTrainedTokenizerFast`] and the mixin
|
||||
[`~tokenization_utils_base.SpecialTokensMixin`].
|
||||
|
||||
Most of those are only useful if you are studying the code of the tokenizers in the library.
|
||||
|
||||
## PreTrainedTokenizerBase
|
||||
|
||||
[[autodoc]] tokenization_utils_base.PreTrainedTokenizerBase
|
||||
- __call__
|
||||
- all
|
||||
|
||||
## SpecialTokensMixin
|
||||
|
||||
[[autodoc]] tokenization_utils_base.SpecialTokensMixin
|
||||
|
||||
## Enums and namedtuples
|
||||
|
||||
[[autodoc]] tokenization_utils_base.TruncationStrategy
|
||||
|
||||
[[autodoc]] tokenization_utils_base.CharSpan
|
||||
|
||||
[[autodoc]] tokenization_utils_base.TokenSpan
|
||||
43
docs/source/en/internal/trainer_utils.mdx
Normal file
43
docs/source/en/internal/trainer_utils.mdx
Normal file
@ -0,0 +1,43 @@
|
||||
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
# Utilities for Trainer
|
||||
|
||||
This page lists all the utility functions used by [`Trainer`].
|
||||
|
||||
Most of those are only useful if you are studying the code of the Trainer in the library.
|
||||
|
||||
## Utilities
|
||||
|
||||
[[autodoc]] EvalPrediction
|
||||
|
||||
[[autodoc]] IntervalStrategy
|
||||
|
||||
[[autodoc]] set_seed
|
||||
|
||||
[[autodoc]] torch_distributed_zero_first
|
||||
|
||||
## Callbacks internals
|
||||
|
||||
[[autodoc]] trainer_callback.CallbackHandler
|
||||
|
||||
## Distributed Evaluation
|
||||
|
||||
[[autodoc]] trainer_pt_utils.DistributedTensorGatherer
|
||||
|
||||
## Distributed Evaluation
|
||||
|
||||
[[autodoc]] HfArgumentParser
|
||||
|
||||
## Debug Utilities
|
||||
|
||||
[[autodoc]] debug_utils.DebugUnderflowOverflow
|
||||
111
docs/source/en/main_classes/callback.mdx
Normal file
111
docs/source/en/main_classes/callback.mdx
Normal file
@ -0,0 +1,111 @@
|
||||
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
# Callbacks
|
||||
|
||||
Callbacks are objects that can customize the behavior of the training loop in the PyTorch
|
||||
[`Trainer`] (this feature is not yet implemented in TensorFlow) that can inspect the training loop
|
||||
state (for progress reporting, logging on TensorBoard or other ML platforms...) and take decisions (like early
|
||||
stopping).
|
||||
|
||||
Callbacks are "read only" pieces of code, apart from the [`TrainerControl`] object they return, they
|
||||
cannot change anything in the training loop. For customizations that require changes in the training loop, you should
|
||||
subclass [`Trainer`] and override the methods you need (see [trainer](trainer) for examples).
|
||||
|
||||
By default a [`Trainer`] will use the following callbacks:
|
||||
|
||||
- [`DefaultFlowCallback`] which handles the default behavior for logging, saving and evaluation.
|
||||
- [`PrinterCallback`] or [`ProgressCallback`] to display progress and print the
|
||||
logs (the first one is used if you deactivate tqdm through the [`TrainingArguments`], otherwise
|
||||
it's the second one).
|
||||
- [`~integrations.TensorBoardCallback`] if tensorboard is accessible (either through PyTorch >= 1.4
|
||||
or tensorboardX).
|
||||
- [`~integrations.WandbCallback`] if [wandb](https://www.wandb.com/) is installed.
|
||||
- [`~integrations.CometCallback`] if [comet_ml](https://www.comet.ml/site/) is installed.
|
||||
- [`~integrations.MLflowCallback`] if [mlflow](https://www.mlflow.org/) is installed.
|
||||
- [`~integrations.AzureMLCallback`] if [azureml-sdk](https://pypi.org/project/azureml-sdk/) is
|
||||
installed.
|
||||
- [`~integrations.CodeCarbonCallback`] if [codecarbon](https://pypi.org/project/codecarbon/) is
|
||||
installed.
|
||||
|
||||
The main class that implements callbacks is [`TrainerCallback`]. It gets the
|
||||
[`TrainingArguments`] used to instantiate the [`Trainer`], can access that
|
||||
Trainer's internal state via [`TrainerState`], and can take some actions on the training loop via
|
||||
[`TrainerControl`].
|
||||
|
||||
|
||||
## Available Callbacks
|
||||
|
||||
Here is the list of the available [`TrainerCallback`] in the library:
|
||||
|
||||
[[autodoc]] integrations.CometCallback
|
||||
- setup
|
||||
|
||||
[[autodoc]] DefaultFlowCallback
|
||||
|
||||
[[autodoc]] PrinterCallback
|
||||
|
||||
[[autodoc]] ProgressCallback
|
||||
|
||||
[[autodoc]] EarlyStoppingCallback
|
||||
|
||||
[[autodoc]] integrations.TensorBoardCallback
|
||||
|
||||
[[autodoc]] integrations.WandbCallback
|
||||
- setup
|
||||
|
||||
[[autodoc]] integrations.MLflowCallback
|
||||
- setup
|
||||
|
||||
[[autodoc]] integrations.AzureMLCallback
|
||||
|
||||
[[autodoc]] integrations.CodeCarbonCallback
|
||||
|
||||
## TrainerCallback
|
||||
|
||||
[[autodoc]] TrainerCallback
|
||||
|
||||
Here is an example of how to register a custom callback with the PyTorch [`Trainer`]:
|
||||
|
||||
```python
|
||||
class MyCallback(TrainerCallback):
|
||||
"A callback that prints a message at the beginning of training"
|
||||
|
||||
def on_train_begin(self, args, state, control, **kwargs):
|
||||
print("Starting training")
|
||||
|
||||
|
||||
trainer = Trainer(
|
||||
model,
|
||||
args,
|
||||
train_dataset=train_dataset,
|
||||
eval_dataset=eval_dataset,
|
||||
callbacks=[MyCallback], # We can either pass the callback class this way or an instance of it (MyCallback())
|
||||
)
|
||||
```
|
||||
|
||||
Another way to register a callback is to call `trainer.add_callback()` as follows:
|
||||
|
||||
```python
|
||||
trainer = Trainer(...)
|
||||
trainer.add_callback(MyCallback)
|
||||
# Alternatively, we can pass an instance of the callback class
|
||||
trainer.add_callback(MyCallback())
|
||||
```
|
||||
|
||||
## TrainerState
|
||||
|
||||
[[autodoc]] TrainerState
|
||||
|
||||
## TrainerControl
|
||||
|
||||
[[autodoc]] TrainerControl
|
||||
28
docs/source/en/main_classes/configuration.mdx
Normal file
28
docs/source/en/main_classes/configuration.mdx
Normal file
@ -0,0 +1,28 @@
|
||||
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
# Configuration
|
||||
|
||||
The base class [`PretrainedConfig`] implements the common methods for loading/saving a configuration
|
||||
either from a local file or directory, or from a pretrained model configuration provided by the library (downloaded
|
||||
from HuggingFace's AWS S3 repository).
|
||||
|
||||
Each derived config class implements model specific attributes. Common attributes present in all config classes are:
|
||||
`hidden_size`, `num_attention_heads`, and `num_hidden_layers`. Text models further implement:
|
||||
`vocab_size`.
|
||||
|
||||
|
||||
## PretrainedConfig
|
||||
|
||||
[[autodoc]] PretrainedConfig
|
||||
- push_to_hub
|
||||
- all
|
||||
64
docs/source/en/main_classes/data_collator.mdx
Normal file
64
docs/source/en/main_classes/data_collator.mdx
Normal file
@ -0,0 +1,64 @@
|
||||
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
# Data Collator
|
||||
|
||||
Data collators are objects that will form a batch by using a list of dataset elements as input. These elements are of
|
||||
the same type as the elements of `train_dataset` or `eval_dataset`.
|
||||
|
||||
To be able to build batches, data collators may apply some processing (like padding). Some of them (like
|
||||
[`DataCollatorForLanguageModeling`]) also apply some random data augmentation (like random masking)
|
||||
on the formed batch.
|
||||
|
||||
Examples of use can be found in the [example scripts](../examples) or [example notebooks](../notebooks).
|
||||
|
||||
|
||||
## Default data collator
|
||||
|
||||
[[autodoc]] data.data_collator.default_data_collator
|
||||
|
||||
## DefaultDataCollator
|
||||
|
||||
[[autodoc]] data.data_collator.DefaultDataCollator
|
||||
|
||||
## DataCollatorWithPadding
|
||||
|
||||
[[autodoc]] data.data_collator.DataCollatorWithPadding
|
||||
|
||||
## DataCollatorForTokenClassification
|
||||
|
||||
[[autodoc]] data.data_collator.DataCollatorForTokenClassification
|
||||
|
||||
## DataCollatorForSeq2Seq
|
||||
|
||||
[[autodoc]] data.data_collator.DataCollatorForSeq2Seq
|
||||
|
||||
## DataCollatorForLanguageModeling
|
||||
|
||||
[[autodoc]] data.data_collator.DataCollatorForLanguageModeling
|
||||
- numpy_mask_tokens
|
||||
- tf_mask_tokens
|
||||
- torch_mask_tokens
|
||||
|
||||
## DataCollatorForWholeWordMask
|
||||
|
||||
[[autodoc]] data.data_collator.DataCollatorForWholeWordMask
|
||||
- numpy_mask_tokens
|
||||
- tf_mask_tokens
|
||||
- torch_mask_tokens
|
||||
|
||||
## DataCollatorForPermutationLanguageModeling
|
||||
|
||||
[[autodoc]] data.data_collator.DataCollatorForPermutationLanguageModeling
|
||||
- numpy_mask_tokens
|
||||
- tf_mask_tokens
|
||||
- torch_mask_tokens
|
||||
2080
docs/source/en/main_classes/deepspeed.mdx
Normal file
2080
docs/source/en/main_classes/deepspeed.mdx
Normal file
File diff suppressed because it is too large
Load Diff
38
docs/source/en/main_classes/feature_extractor.mdx
Normal file
38
docs/source/en/main_classes/feature_extractor.mdx
Normal file
@ -0,0 +1,38 @@
|
||||
<!--Copyright 2021 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
# Feature Extractor
|
||||
|
||||
A feature extractor is in charge of preparing input features for a multi-modal model. This includes feature extraction
|
||||
from sequences, *e.g.*, pre-processing audio files to Log-Mel Spectrogram features, feature extraction from images
|
||||
*e.g.* cropping image image files, but also padding, normalization, and conversion to Numpy, PyTorch, and TensorFlow
|
||||
tensors.
|
||||
|
||||
|
||||
## FeatureExtractionMixin
|
||||
|
||||
[[autodoc]] feature_extraction_utils.FeatureExtractionMixin
|
||||
- from_pretrained
|
||||
- save_pretrained
|
||||
|
||||
## SequenceFeatureExtractor
|
||||
|
||||
[[autodoc]] SequenceFeatureExtractor
|
||||
- pad
|
||||
|
||||
## BatchFeature
|
||||
|
||||
[[autodoc]] BatchFeature
|
||||
|
||||
## ImageFeatureExtractionMixin
|
||||
|
||||
[[autodoc]] image_utils.ImageFeatureExtractionMixin
|
||||
24
docs/source/en/main_classes/keras_callbacks.mdx
Normal file
24
docs/source/en/main_classes/keras_callbacks.mdx
Normal file
@ -0,0 +1,24 @@
|
||||
<!--Copyright 2021 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
# Keras callbacks
|
||||
|
||||
When training a Transformers model with Keras, there are some library-specific callbacks available to automate common
|
||||
tasks:
|
||||
|
||||
## KerasMetricCallback
|
||||
|
||||
[[autodoc]] KerasMetricCallback
|
||||
|
||||
## PushToHubCallback
|
||||
|
||||
[[autodoc]] PushToHubCallback
|
||||
110
docs/source/en/main_classes/logging.mdx
Normal file
110
docs/source/en/main_classes/logging.mdx
Normal file
@ -0,0 +1,110 @@
|
||||
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
# Logging
|
||||
|
||||
🤗 Transformers has a centralized logging system, so that you can setup the verbosity of the library easily.
|
||||
|
||||
Currently the default verbosity of the library is `WARNING`.
|
||||
|
||||
To change the level of verbosity, just use one of the direct setters. For instance, here is how to change the verbosity
|
||||
to the INFO level.
|
||||
|
||||
```python
|
||||
import transformers
|
||||
|
||||
transformers.logging.set_verbosity_info()
|
||||
```
|
||||
|
||||
You can also use the environment variable `TRANSFORMERS_VERBOSITY` to override the default verbosity. You can set it
|
||||
to one of the following: `debug`, `info`, `warning`, `error`, `critical`. For example:
|
||||
|
||||
```bash
|
||||
TRANSFORMERS_VERBOSITY=error ./myprogram.py
|
||||
```
|
||||
|
||||
Additionally, some `warnings` can be disabled by setting the environment variable
|
||||
`TRANSFORMERS_NO_ADVISORY_WARNINGS` to a true value, like *1*. This will disable any warning that is logged using
|
||||
[`logger.warning_advice`]. For example:
|
||||
|
||||
```bash
|
||||
TRANSFORMERS_NO_ADVISORY_WARNINGS=1 ./myprogram.py
|
||||
```
|
||||
|
||||
Here is an example of how to use `logging` in a module:
|
||||
|
||||
```python
|
||||
from transformers.utils import logging
|
||||
|
||||
logging.set_verbosity_info()
|
||||
logger = logging.get_logger(__name__)
|
||||
logger.info("INFO")
|
||||
logger.warning("WARN")
|
||||
```
|
||||
|
||||
Above, a `logger` instance is created from `logging.get_logger(__name__)`. If you want to use `logging` in a script, you shouldn't pass `__name__` to `logging.get_logger`. For example:
|
||||
|
||||
```python
|
||||
from transformers.utils import logging
|
||||
|
||||
if __name__ == "__main__":
|
||||
logging.set_verbosity_info()
|
||||
# leave it empy or use a string
|
||||
logger = logging.get_logger()
|
||||
logger.info("INFO")
|
||||
logger.warning("WARN")
|
||||
```
|
||||
|
||||
All the methods of this logging module are documented below, the main ones are
|
||||
[`logging.get_verbosity`] to get the current level of verbosity in the logger and
|
||||
[`logging.set_verbosity`] to set the verbosity to the level of your choice. In order (from the least
|
||||
verbose to the most verbose), those levels (with their corresponding int values in parenthesis) are:
|
||||
|
||||
- `transformers.logging.CRITICAL` or `transformers.logging.FATAL` (int value, 50): only report the most
|
||||
critical errors.
|
||||
- `transformers.logging.ERROR` (int value, 40): only report errors.
|
||||
- `transformers.logging.WARNING` or `transformers.logging.WARN` (int value, 30): only reports error and
|
||||
warnings. This the default level used by the library.
|
||||
- `transformers.logging.INFO` (int value, 20): reports error, warnings and basic information.
|
||||
- `transformers.logging.DEBUG` (int value, 10): report all information.
|
||||
|
||||
By default, `tqdm` progress bars will be displayed during model download. [`logging.disable_progress_bar`] and [`logging.enable_progress_bar`] can be used to suppress or unsuppress this behavior.
|
||||
|
||||
## Base setters
|
||||
|
||||
[[autodoc]] logging.set_verbosity_error
|
||||
|
||||
[[autodoc]] logging.set_verbosity_warning
|
||||
|
||||
[[autodoc]] logging.set_verbosity_info
|
||||
|
||||
[[autodoc]] logging.set_verbosity_debug
|
||||
|
||||
## Other functions
|
||||
|
||||
[[autodoc]] logging.get_verbosity
|
||||
|
||||
[[autodoc]] logging.set_verbosity
|
||||
|
||||
[[autodoc]] logging.get_logger
|
||||
|
||||
[[autodoc]] logging.enable_default_handler
|
||||
|
||||
[[autodoc]] logging.disable_default_handler
|
||||
|
||||
[[autodoc]] logging.enable_explicit_format
|
||||
|
||||
[[autodoc]] logging.reset_format
|
||||
|
||||
[[autodoc]] logging.enable_progress_bar
|
||||
|
||||
[[autodoc]] logging.disable_progress_bar
|
||||
91
docs/source/en/main_classes/model.mdx
Normal file
91
docs/source/en/main_classes/model.mdx
Normal file
@ -0,0 +1,91 @@
|
||||
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
# Models
|
||||
|
||||
The base classes [`PreTrainedModel`], [`TFPreTrainedModel`], and
|
||||
[`FlaxPreTrainedModel`] implement the common methods for loading/saving a model either from a local
|
||||
file or directory, or from a pretrained model configuration provided by the library (downloaded from HuggingFace's AWS
|
||||
S3 repository).
|
||||
|
||||
[`PreTrainedModel`] and [`TFPreTrainedModel`] also implement a few methods which
|
||||
are common among all the models to:
|
||||
|
||||
- resize the input token embeddings when new tokens are added to the vocabulary
|
||||
- prune the attention heads of the model.
|
||||
|
||||
The other methods that are common to each model are defined in [`~modeling_utils.ModuleUtilsMixin`]
|
||||
(for the PyTorch models) and [`~modeling_tf_utils.TFModuleUtilsMixin`] (for the TensorFlow models) or
|
||||
for text generation, [`~generation_utils.GenerationMixin`] (for the PyTorch models),
|
||||
[`~generation_tf_utils.TFGenerationMixin`] (for the TensorFlow models) and
|
||||
[`~generation_flax_utils.FlaxGenerationMixin`] (for the Flax/JAX models).
|
||||
|
||||
|
||||
## PreTrainedModel
|
||||
|
||||
[[autodoc]] PreTrainedModel
|
||||
- push_to_hub
|
||||
- all
|
||||
|
||||
<a id='from_pretrained-torch-dtype'></a>
|
||||
|
||||
### Model Instantiation dtype
|
||||
|
||||
Under Pytorch a model normally gets instantiated with `torch.float32` format. This can be an issue if one tries to
|
||||
load a model whose weights are in fp16, since it'd require twice as much memory. To overcome this limitation, you can
|
||||
either explicitly pass the desired `dtype` using `torch_dtype` argument:
|
||||
|
||||
```python
|
||||
model = T5ForConditionalGeneration.from_pretrained("t5", torch_dtype=torch.float16)
|
||||
```
|
||||
|
||||
or, if you want the model to always load in the most optimal memory pattern, you can use the special value `"auto"`,
|
||||
and then `dtype` will be automatically derived from the model's weights:
|
||||
|
||||
```python
|
||||
model = T5ForConditionalGeneration.from_pretrained("t5", torch_dtype="auto")
|
||||
```
|
||||
|
||||
Models instantiated from scratch can also be told which `dtype` to use with:
|
||||
|
||||
```python
|
||||
config = T5Config.from_pretrained("t5")
|
||||
model = AutoModel.from_config(config)
|
||||
```
|
||||
|
||||
Due to Pytorch design, this functionality is only available for floating dtypes.
|
||||
|
||||
|
||||
|
||||
## ModuleUtilsMixin
|
||||
|
||||
[[autodoc]] modeling_utils.ModuleUtilsMixin
|
||||
|
||||
## TFPreTrainedModel
|
||||
|
||||
[[autodoc]] TFPreTrainedModel
|
||||
- push_to_hub
|
||||
- all
|
||||
|
||||
## TFModelUtilsMixin
|
||||
|
||||
[[autodoc]] modeling_tf_utils.TFModelUtilsMixin
|
||||
|
||||
## FlaxPreTrainedModel
|
||||
|
||||
[[autodoc]] FlaxPreTrainedModel
|
||||
- push_to_hub
|
||||
- all
|
||||
|
||||
## Pushing to the Hub
|
||||
|
||||
[[autodoc]] utils.PushToHubMixin
|
||||
50
docs/source/en/main_classes/onnx.mdx
Normal file
50
docs/source/en/main_classes/onnx.mdx
Normal file
@ -0,0 +1,50 @@
|
||||
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
# Exporting 🤗 Transformers models to ONNX
|
||||
|
||||
🤗 Transformers provides a `transformers.onnx` package that enables you to
|
||||
convert model checkpoints to an ONNX graph by leveraging configuration objects.
|
||||
|
||||
See the [guide](../serialization) on exporting 🤗 Transformers models for more
|
||||
details.
|
||||
|
||||
## ONNX Configurations
|
||||
|
||||
We provide three abstract classes that you should inherit from, depending on the
|
||||
type of model architecture you wish to export:
|
||||
|
||||
* Encoder-based models inherit from [`~onnx.config.OnnxConfig`]
|
||||
* Decoder-based models inherit from [`~onnx.config.OnnxConfigWithPast`]
|
||||
* Encoder-decoder models inherit from [`~onnx.config.OnnxSeq2SeqConfigWithPast`]
|
||||
|
||||
### OnnxConfig
|
||||
|
||||
[[autodoc]] onnx.config.OnnxConfig
|
||||
|
||||
### OnnxConfigWithPast
|
||||
|
||||
[[autodoc]] onnx.config.OnnxConfigWithPast
|
||||
|
||||
### OnnxSeq2SeqConfigWithPast
|
||||
|
||||
[[autodoc]] onnx.config.OnnxSeq2SeqConfigWithPast
|
||||
|
||||
## ONNX Features
|
||||
|
||||
Each ONNX configuration is associated with a set of _features_ that enable you
|
||||
to export models for different types of topologies or tasks.
|
||||
|
||||
### FeaturesManager
|
||||
|
||||
[[autodoc]] onnx.features.FeaturesManager
|
||||
|
||||
71
docs/source/en/main_classes/optimizer_schedules.mdx
Normal file
71
docs/source/en/main_classes/optimizer_schedules.mdx
Normal file
@ -0,0 +1,71 @@
|
||||
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
# Optimization
|
||||
|
||||
The `.optimization` module provides:
|
||||
|
||||
- an optimizer with weight decay fixed that can be used to fine-tuned models, and
|
||||
- several schedules in the form of schedule objects that inherit from `_LRSchedule`:
|
||||
- a gradient accumulation class to accumulate the gradients of multiple batches
|
||||
|
||||
## AdamW (PyTorch)
|
||||
|
||||
[[autodoc]] AdamW
|
||||
|
||||
## AdaFactor (PyTorch)
|
||||
|
||||
[[autodoc]] Adafactor
|
||||
|
||||
## AdamWeightDecay (TensorFlow)
|
||||
|
||||
[[autodoc]] AdamWeightDecay
|
||||
|
||||
[[autodoc]] create_optimizer
|
||||
|
||||
## Schedules
|
||||
|
||||
### Learning Rate Schedules (Pytorch)
|
||||
|
||||
[[autodoc]] SchedulerType
|
||||
|
||||
[[autodoc]] get_scheduler
|
||||
|
||||
[[autodoc]] get_constant_schedule
|
||||
|
||||
[[autodoc]] get_constant_schedule_with_warmup
|
||||
|
||||
<img alt="" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/warmup_constant_schedule.png"/>
|
||||
|
||||
[[autodoc]] get_cosine_schedule_with_warmup
|
||||
|
||||
<img alt="" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/warmup_cosine_schedule.png"/>
|
||||
|
||||
[[autodoc]] get_cosine_with_hard_restarts_schedule_with_warmup
|
||||
|
||||
<img alt="" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/warmup_cosine_hard_restarts_schedule.png"/>
|
||||
|
||||
[[autodoc]] get_linear_schedule_with_warmup
|
||||
|
||||
<img alt="" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/warmup_linear_schedule.png"/>
|
||||
|
||||
[[autodoc]] get_polynomial_decay_schedule_with_warmup
|
||||
|
||||
### Warmup (TensorFlow)
|
||||
|
||||
[[autodoc]] WarmUp
|
||||
|
||||
## Gradient Strategies
|
||||
|
||||
### GradientAccumulator (TensorFlow)
|
||||
|
||||
[[autodoc]] GradientAccumulator
|
||||
269
docs/source/en/main_classes/output.mdx
Normal file
269
docs/source/en/main_classes/output.mdx
Normal file
@ -0,0 +1,269 @@
|
||||
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
# Model outputs
|
||||
|
||||
All models have outputs that are instances of subclasses of [`~utils.ModelOutput`]. Those are
|
||||
data structures containing all the information returned by the model, but that can also be used as tuples or
|
||||
dictionaries.
|
||||
|
||||
Let's see of this looks on an example:
|
||||
|
||||
```python
|
||||
from transformers import BertTokenizer, BertForSequenceClassification
|
||||
import torch
|
||||
|
||||
tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
|
||||
model = BertForSequenceClassification.from_pretrained("bert-base-uncased")
|
||||
|
||||
inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
|
||||
labels = torch.tensor([1]).unsqueeze(0) # Batch size 1
|
||||
outputs = model(**inputs, labels=labels)
|
||||
```
|
||||
|
||||
The `outputs` object is a [`~modeling_outputs.SequenceClassifierOutput`], as we can see in the
|
||||
documentation of that class below, it means it has an optional `loss`, a `logits` an optional `hidden_states` and
|
||||
an optional `attentions` attribute. Here we have the `loss` since we passed along `labels`, but we don't have
|
||||
`hidden_states` and `attentions` because we didn't pass `output_hidden_states=True` or
|
||||
`output_attentions=True`.
|
||||
|
||||
You can access each attribute as you would usually do, and if that attribute has not been returned by the model, you
|
||||
will get `None`. Here for instance `outputs.loss` is the loss computed by the model, and `outputs.attentions` is
|
||||
`None`.
|
||||
|
||||
When considering our `outputs` object as tuple, it only considers the attributes that don't have `None` values.
|
||||
Here for instance, it has two elements, `loss` then `logits`, so
|
||||
|
||||
```python
|
||||
outputs[:2]
|
||||
```
|
||||
|
||||
will return the tuple `(outputs.loss, outputs.logits)` for instance.
|
||||
|
||||
When considering our `outputs` object as dictionary, it only considers the attributes that don't have `None`
|
||||
values. Here for instance, it has two keys that are `loss` and `logits`.
|
||||
|
||||
We document here the generic model outputs that are used by more than one model type. Specific output types are
|
||||
documented on their corresponding model page.
|
||||
|
||||
## ModelOutput
|
||||
|
||||
[[autodoc]] utils.ModelOutput
|
||||
- to_tuple
|
||||
|
||||
## BaseModelOutput
|
||||
|
||||
[[autodoc]] modeling_outputs.BaseModelOutput
|
||||
|
||||
## BaseModelOutputWithPooling
|
||||
|
||||
[[autodoc]] modeling_outputs.BaseModelOutputWithPooling
|
||||
|
||||
## BaseModelOutputWithCrossAttentions
|
||||
|
||||
[[autodoc]] modeling_outputs.BaseModelOutputWithCrossAttentions
|
||||
|
||||
## BaseModelOutputWithPoolingAndCrossAttentions
|
||||
|
||||
[[autodoc]] modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions
|
||||
|
||||
## BaseModelOutputWithPast
|
||||
|
||||
[[autodoc]] modeling_outputs.BaseModelOutputWithPast
|
||||
|
||||
## BaseModelOutputWithPastAndCrossAttentions
|
||||
|
||||
[[autodoc]] modeling_outputs.BaseModelOutputWithPastAndCrossAttentions
|
||||
|
||||
## Seq2SeqModelOutput
|
||||
|
||||
[[autodoc]] modeling_outputs.Seq2SeqModelOutput
|
||||
|
||||
## CausalLMOutput
|
||||
|
||||
[[autodoc]] modeling_outputs.CausalLMOutput
|
||||
|
||||
## CausalLMOutputWithCrossAttentions
|
||||
|
||||
[[autodoc]] modeling_outputs.CausalLMOutputWithCrossAttentions
|
||||
|
||||
## CausalLMOutputWithPast
|
||||
|
||||
[[autodoc]] modeling_outputs.CausalLMOutputWithPast
|
||||
|
||||
## MaskedLMOutput
|
||||
|
||||
[[autodoc]] modeling_outputs.MaskedLMOutput
|
||||
|
||||
## Seq2SeqLMOutput
|
||||
|
||||
[[autodoc]] modeling_outputs.Seq2SeqLMOutput
|
||||
|
||||
## NextSentencePredictorOutput
|
||||
|
||||
[[autodoc]] modeling_outputs.NextSentencePredictorOutput
|
||||
|
||||
## SequenceClassifierOutput
|
||||
|
||||
[[autodoc]] modeling_outputs.SequenceClassifierOutput
|
||||
|
||||
## Seq2SeqSequenceClassifierOutput
|
||||
|
||||
[[autodoc]] modeling_outputs.Seq2SeqSequenceClassifierOutput
|
||||
|
||||
## MultipleChoiceModelOutput
|
||||
|
||||
[[autodoc]] modeling_outputs.MultipleChoiceModelOutput
|
||||
|
||||
## TokenClassifierOutput
|
||||
|
||||
[[autodoc]] modeling_outputs.TokenClassifierOutput
|
||||
|
||||
## QuestionAnsweringModelOutput
|
||||
|
||||
[[autodoc]] modeling_outputs.QuestionAnsweringModelOutput
|
||||
|
||||
## Seq2SeqQuestionAnsweringModelOutput
|
||||
|
||||
[[autodoc]] modeling_outputs.Seq2SeqQuestionAnsweringModelOutput
|
||||
|
||||
## TFBaseModelOutput
|
||||
|
||||
[[autodoc]] modeling_tf_outputs.TFBaseModelOutput
|
||||
|
||||
## TFBaseModelOutputWithPooling
|
||||
|
||||
[[autodoc]] modeling_tf_outputs.TFBaseModelOutputWithPooling
|
||||
|
||||
## TFBaseModelOutputWithPoolingAndCrossAttentions
|
||||
|
||||
[[autodoc]] modeling_tf_outputs.TFBaseModelOutputWithPoolingAndCrossAttentions
|
||||
|
||||
## TFBaseModelOutputWithPast
|
||||
|
||||
[[autodoc]] modeling_tf_outputs.TFBaseModelOutputWithPast
|
||||
|
||||
## TFBaseModelOutputWithPastAndCrossAttentions
|
||||
|
||||
[[autodoc]] modeling_tf_outputs.TFBaseModelOutputWithPastAndCrossAttentions
|
||||
|
||||
## TFSeq2SeqModelOutput
|
||||
|
||||
[[autodoc]] modeling_tf_outputs.TFSeq2SeqModelOutput
|
||||
|
||||
## TFCausalLMOutput
|
||||
|
||||
[[autodoc]] modeling_tf_outputs.TFCausalLMOutput
|
||||
|
||||
## TFCausalLMOutputWithCrossAttentions
|
||||
|
||||
[[autodoc]] modeling_tf_outputs.TFCausalLMOutputWithCrossAttentions
|
||||
|
||||
## TFCausalLMOutputWithPast
|
||||
|
||||
[[autodoc]] modeling_tf_outputs.TFCausalLMOutputWithPast
|
||||
|
||||
## TFMaskedLMOutput
|
||||
|
||||
[[autodoc]] modeling_tf_outputs.TFMaskedLMOutput
|
||||
|
||||
## TFSeq2SeqLMOutput
|
||||
|
||||
[[autodoc]] modeling_tf_outputs.TFSeq2SeqLMOutput
|
||||
|
||||
## TFNextSentencePredictorOutput
|
||||
|
||||
[[autodoc]] modeling_tf_outputs.TFNextSentencePredictorOutput
|
||||
|
||||
## TFSequenceClassifierOutput
|
||||
|
||||
[[autodoc]] modeling_tf_outputs.TFSequenceClassifierOutput
|
||||
|
||||
## TFSeq2SeqSequenceClassifierOutput
|
||||
|
||||
[[autodoc]] modeling_tf_outputs.TFSeq2SeqSequenceClassifierOutput
|
||||
|
||||
## TFMultipleChoiceModelOutput
|
||||
|
||||
[[autodoc]] modeling_tf_outputs.TFMultipleChoiceModelOutput
|
||||
|
||||
## TFTokenClassifierOutput
|
||||
|
||||
[[autodoc]] modeling_tf_outputs.TFTokenClassifierOutput
|
||||
|
||||
## TFQuestionAnsweringModelOutput
|
||||
|
||||
[[autodoc]] modeling_tf_outputs.TFQuestionAnsweringModelOutput
|
||||
|
||||
## TFSeq2SeqQuestionAnsweringModelOutput
|
||||
|
||||
[[autodoc]] modeling_tf_outputs.TFSeq2SeqQuestionAnsweringModelOutput
|
||||
|
||||
## FlaxBaseModelOutput
|
||||
|
||||
[[autodoc]] modeling_flax_outputs.FlaxBaseModelOutput
|
||||
|
||||
## FlaxBaseModelOutputWithPast
|
||||
|
||||
[[autodoc]] modeling_flax_outputs.FlaxBaseModelOutputWithPast
|
||||
|
||||
## FlaxBaseModelOutputWithPooling
|
||||
|
||||
[[autodoc]] modeling_flax_outputs.FlaxBaseModelOutputWithPooling
|
||||
|
||||
## FlaxBaseModelOutputWithPastAndCrossAttentions
|
||||
|
||||
[[autodoc]] modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions
|
||||
|
||||
## FlaxSeq2SeqModelOutput
|
||||
|
||||
[[autodoc]] modeling_flax_outputs.FlaxSeq2SeqModelOutput
|
||||
|
||||
## FlaxCausalLMOutputWithCrossAttentions
|
||||
|
||||
[[autodoc]] modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions
|
||||
|
||||
## FlaxMaskedLMOutput
|
||||
|
||||
[[autodoc]] modeling_flax_outputs.FlaxMaskedLMOutput
|
||||
|
||||
## FlaxSeq2SeqLMOutput
|
||||
|
||||
[[autodoc]] modeling_flax_outputs.FlaxSeq2SeqLMOutput
|
||||
|
||||
## FlaxNextSentencePredictorOutput
|
||||
|
||||
[[autodoc]] modeling_flax_outputs.FlaxNextSentencePredictorOutput
|
||||
|
||||
## FlaxSequenceClassifierOutput
|
||||
|
||||
[[autodoc]] modeling_flax_outputs.FlaxSequenceClassifierOutput
|
||||
|
||||
## FlaxSeq2SeqSequenceClassifierOutput
|
||||
|
||||
[[autodoc]] modeling_flax_outputs.FlaxSeq2SeqSequenceClassifierOutput
|
||||
|
||||
## FlaxMultipleChoiceModelOutput
|
||||
|
||||
[[autodoc]] modeling_flax_outputs.FlaxMultipleChoiceModelOutput
|
||||
|
||||
## FlaxTokenClassifierOutput
|
||||
|
||||
[[autodoc]] modeling_flax_outputs.FlaxTokenClassifierOutput
|
||||
|
||||
## FlaxQuestionAnsweringModelOutput
|
||||
|
||||
[[autodoc]] modeling_flax_outputs.FlaxQuestionAnsweringModelOutput
|
||||
|
||||
## FlaxSeq2SeqQuestionAnsweringModelOutput
|
||||
|
||||
[[autodoc]] modeling_flax_outputs.FlaxSeq2SeqQuestionAnsweringModelOutput
|
||||
440
docs/source/en/main_classes/pipelines.mdx
Normal file
440
docs/source/en/main_classes/pipelines.mdx
Normal file
@ -0,0 +1,440 @@
|
||||
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
# Pipelines
|
||||
|
||||
The pipelines are a great and easy way to use models for inference. These pipelines are objects that abstract most of
|
||||
the complex code from the library, offering a simple API dedicated to several tasks, including Named Entity
|
||||
Recognition, Masked Language Modeling, Sentiment Analysis, Feature Extraction and Question Answering. See the
|
||||
[task summary](../task_summary) for examples of use.
|
||||
|
||||
There are two categories of pipeline abstractions to be aware about:
|
||||
|
||||
- The [`pipeline`] which is the most powerful object encapsulating all other pipelines.
|
||||
- The other task-specific pipelines:
|
||||
|
||||
- [`AudioClassificationPipeline`]
|
||||
- [`AutomaticSpeechRecognitionPipeline`]
|
||||
- [`ConversationalPipeline`]
|
||||
- [`FeatureExtractionPipeline`]
|
||||
- [`FillMaskPipeline`]
|
||||
- [`ImageClassificationPipeline`]
|
||||
- [`ImageSegmentationPipeline`]
|
||||
- [`ObjectDetectionPipeline`]
|
||||
- [`QuestionAnsweringPipeline`]
|
||||
- [`SummarizationPipeline`]
|
||||
- [`TableQuestionAnsweringPipeline`]
|
||||
- [`TextClassificationPipeline`]
|
||||
- [`TextGenerationPipeline`]
|
||||
- [`Text2TextGenerationPipeline`]
|
||||
- [`TokenClassificationPipeline`]
|
||||
- [`TranslationPipeline`]
|
||||
- [`ZeroShotClassificationPipeline`]
|
||||
- [`ZeroShotImageClassificationPipeline`]
|
||||
|
||||
## The pipeline abstraction
|
||||
|
||||
The *pipeline* abstraction is a wrapper around all the other available pipelines. It is instantiated as any other
|
||||
pipeline but can provide additional quality of life.
|
||||
|
||||
Simple call on one item:
|
||||
|
||||
```python
|
||||
>>> pipe = pipeline("text-classification")
|
||||
>>> pipe("This restaurant is awesome")
|
||||
[{'label': 'POSITIVE', 'score': 0.9998743534088135}]
|
||||
```
|
||||
|
||||
If you want to use a specific model from the [hub](https://huggingface.co) you can ignore the task if the model on
|
||||
the hub already defines it:
|
||||
|
||||
```python
|
||||
>>> pipe = pipeline(model="roberta-large-mnli")
|
||||
>>> pipe("This restaurant is awesome")
|
||||
[{'label': 'POSITIVE', 'score': 0.9998743534088135}]
|
||||
```
|
||||
|
||||
To call a pipeline on many items, you can either call with a *list*.
|
||||
|
||||
```python
|
||||
>>> pipe = pipeline("text-classification")
|
||||
>>> pipe(["This restaurant is awesome", "This restaurant is aweful"])
|
||||
[{'label': 'POSITIVE', 'score': 0.9998743534088135},
|
||||
{'label': 'NEGATIVE', 'score': 0.9996669292449951}]
|
||||
```
|
||||
|
||||
To iterate of full datasets it is recommended to use a `dataset` directly. This means you don't need to allocate
|
||||
the whole dataset at once, nor do you need to do batching yourself. This should work just as fast as custom loops on
|
||||
GPU. If it doesn't don't hesitate to create an issue.
|
||||
|
||||
```python
|
||||
import datasets
|
||||
from transformers import pipeline
|
||||
from transformers.pipelines.pt_utils import KeyDataset
|
||||
from tqdm.auto import tqdm
|
||||
|
||||
pipe = pipeline("automatic-speech-recognition", model="facebook/wav2vec2-base-960h", device=0)
|
||||
dataset = datasets.load_dataset("superb", name="asr", split="test")
|
||||
|
||||
# KeyDataset (only *pt*) will simply return the item in the dict returned by the dataset item
|
||||
# as we're not interested in the *target* part of the dataset.
|
||||
for out in tqdm(pipe(KeyDataset(dataset, "file"))):
|
||||
print(out)
|
||||
# {"text": "NUMBER TEN FRESH NELLY IS WAITING ON YOU GOOD NIGHT HUSBAND"}
|
||||
# {"text": ....}
|
||||
# ....
|
||||
```
|
||||
|
||||
For ease of use, a generator is also possible:
|
||||
|
||||
|
||||
```python
|
||||
from transformers import pipeline
|
||||
|
||||
pipe = pipeline("text-classification")
|
||||
|
||||
|
||||
def data():
|
||||
while True:
|
||||
# This could come from a dataset, a database, a queue or HTTP request
|
||||
# in a server
|
||||
# Caveat: because this is iterative, you cannot use `num_workers > 1` variable
|
||||
# to use multiple threads to preprocess data. You can still have 1 thread that
|
||||
# does the preprocessing while the main runs the big inference
|
||||
yield "This is a test"
|
||||
|
||||
|
||||
for out in pipe(data()):
|
||||
print(out)
|
||||
# {"text": "NUMBER TEN FRESH NELLY IS WAITING ON YOU GOOD NIGHT HUSBAND"}
|
||||
# {"text": ....}
|
||||
# ....
|
||||
```
|
||||
|
||||
[[autodoc]] pipeline
|
||||
|
||||
## Pipeline batching
|
||||
|
||||
All pipelines can use batching. This will work
|
||||
whenever the pipeline uses its streaming ability (so when passing lists or `Dataset` or `generator`).
|
||||
|
||||
```python
|
||||
from transformers import pipeline
|
||||
from transformers.pipelines.pt_utils import KeyDataset
|
||||
import datasets
|
||||
|
||||
dataset = datasets.load_dataset("imdb", name="plain_text", split="unsupervised")
|
||||
pipe = pipeline("text-classification", device=0)
|
||||
for out in pipe(KeyDataset(dataset, "text"), batch_size=8, truncation="only_first"):
|
||||
print(out)
|
||||
# [{'label': 'POSITIVE', 'score': 0.9998743534088135}]
|
||||
# Exactly the same output as before, but the content are passed
|
||||
# as batches to the model
|
||||
```
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
However, this is not automatically a win for performance. It can be either a 10x speedup or 5x slowdown depending
|
||||
on hardware, data and the actual model being used.
|
||||
|
||||
Example where it's mostly a speedup:
|
||||
|
||||
</Tip>
|
||||
|
||||
```python
|
||||
from transformers import pipeline
|
||||
from torch.utils.data import Dataset
|
||||
from tqdm.auto import tqdm
|
||||
|
||||
pipe = pipeline("text-classification", device=0)
|
||||
|
||||
|
||||
class MyDataset(Dataset):
|
||||
def __len__(self):
|
||||
return 5000
|
||||
|
||||
def __getitem__(self, i):
|
||||
return "This is a test"
|
||||
|
||||
|
||||
dataset = MyDataset()
|
||||
|
||||
for batch_size in [1, 8, 64, 256]:
|
||||
print("-" * 30)
|
||||
print(f"Streaming batch_size={batch_size}")
|
||||
for out in tqdm(pipe(dataset, batch_size=batch_size), total=len(dataset)):
|
||||
pass
|
||||
```
|
||||
|
||||
```
|
||||
# On GTX 970
|
||||
------------------------------
|
||||
Streaming no batching
|
||||
100%|██████████████████████████████████████████████████████████████████████| 5000/5000 [00:26<00:00, 187.52it/s]
|
||||
------------------------------
|
||||
Streaming batch_size=8
|
||||
100%|█████████████████████████████████████████████████████████████████████| 5000/5000 [00:04<00:00, 1205.95it/s]
|
||||
------------------------------
|
||||
Streaming batch_size=64
|
||||
100%|█████████████████████████████████████████████████████████████████████| 5000/5000 [00:02<00:00, 2478.24it/s]
|
||||
------------------------------
|
||||
Streaming batch_size=256
|
||||
100%|█████████████████████████████████████████████████████████████████████| 5000/5000 [00:01<00:00, 2554.43it/s]
|
||||
(diminishing returns, saturated the GPU)
|
||||
```
|
||||
|
||||
Example where it's most a slowdown:
|
||||
|
||||
```python
|
||||
class MyDataset(Dataset):
|
||||
def __len__(self):
|
||||
return 5000
|
||||
|
||||
def __getitem__(self, i):
|
||||
if i % 64 == 0:
|
||||
n = 100
|
||||
else:
|
||||
n = 1
|
||||
return "This is a test" * n
|
||||
```
|
||||
|
||||
This is a occasional very long sentence compared to the other. In that case, the **whole** batch will need to be 400
|
||||
tokens long, so the whole batch will be [64, 400] instead of [64, 4], leading to the high slowdown. Even worse, on
|
||||
bigger batches, the program simply crashes.
|
||||
|
||||
|
||||
```
|
||||
------------------------------
|
||||
Streaming no batching
|
||||
100%|█████████████████████████████████████████████████████████████████████| 1000/1000 [00:05<00:00, 183.69it/s]
|
||||
------------------------------
|
||||
Streaming batch_size=8
|
||||
100%|█████████████████████████████████████████████████████████████████████| 1000/1000 [00:03<00:00, 265.74it/s]
|
||||
------------------------------
|
||||
Streaming batch_size=64
|
||||
100%|██████████████████████████████████████████████████████████████████████| 1000/1000 [00:26<00:00, 37.80it/s]
|
||||
------------------------------
|
||||
Streaming batch_size=256
|
||||
0%| | 0/1000 [00:00<?, ?it/s]
|
||||
Traceback (most recent call last):
|
||||
File "/home/nicolas/src/transformers/test.py", line 42, in <module>
|
||||
for out in tqdm(pipe(dataset, batch_size=256), total=len(dataset)):
|
||||
....
|
||||
q = q / math.sqrt(dim_per_head) # (bs, n_heads, q_length, dim_per_head)
|
||||
RuntimeError: CUDA out of memory. Tried to allocate 376.00 MiB (GPU 0; 3.95 GiB total capacity; 1.72 GiB already allocated; 354.88 MiB free; 2.46 GiB reserved in total by PyTorch)
|
||||
```
|
||||
|
||||
There are no good (general) solutions for this problem, and your mileage may vary depending on your use cases. Rule of
|
||||
thumb:
|
||||
|
||||
For users, a rule of thumb is:
|
||||
|
||||
- **Measure performance on your load, with your hardware. Measure, measure, and keep measuring. Real numbers are the
|
||||
only way to go.**
|
||||
- If you are latency constrained (live product doing inference), don't batch
|
||||
- If you are using CPU, don't batch.
|
||||
- If you are using throughput (you want to run your model on a bunch of static data), on GPU, then:
|
||||
|
||||
- If you have no clue about the size of the sequence_length ("natural" data), by default don't batch, measure and
|
||||
try tentatively to add it, add OOM checks to recover when it will fail (and it will at some point if you don't
|
||||
control the sequence_length.)
|
||||
- If your sequence_length is super regular, then batching is more likely to be VERY interesting, measure and push
|
||||
it until you get OOMs.
|
||||
- The larger the GPU the more likely batching is going to be more interesting
|
||||
- As soon as you enable batching, make sure you can handle OOMs nicely.
|
||||
|
||||
## Pipeline chunk batching
|
||||
|
||||
`zero-shot-classification` and `question-answering` are slightly specific in the sense, that a single input might yield
|
||||
multiple forward pass of a model. Under normal circumstances, this would yield issues with `batch_size` argument.
|
||||
|
||||
In order to circumvent this issue, both of these pipelines are a bit specific, they are `ChunkPipeline` instead of
|
||||
regular `Pipeline`. In short:
|
||||
|
||||
|
||||
```python
|
||||
preprocessed = pipe.preprocess(inputs)
|
||||
model_outputs = pipe.forward(preprocessed)
|
||||
outputs = pipe.postprocess(model_outputs)
|
||||
```
|
||||
|
||||
Now becomes:
|
||||
|
||||
|
||||
```python
|
||||
all_model_outputs = []
|
||||
for preprocessed in pipe.preprocess(inputs):
|
||||
model_outputs = pipe.forward(preprocessed)
|
||||
all_model_outputs.append(model_outputs)
|
||||
outputs = pipe.postprocess(all_model_outputs)
|
||||
```
|
||||
|
||||
This should be very transparent to your code because the pipelines are used in
|
||||
the same way.
|
||||
|
||||
This is a simplified view, since the pipeline can handle automatically the batch to ! Meaning you don't have to care
|
||||
about how many forward passes you inputs are actually going to trigger, you can optimize the `batch_size`
|
||||
independently of the inputs. The caveats from the previous section still apply.
|
||||
|
||||
## Pipeline custom code
|
||||
|
||||
If you want to override a specific pipeline.
|
||||
|
||||
Don't hesitate to create an issue for your task at hand, the goal of the pipeline is to be easy to use and support most
|
||||
cases, so `transformers` could maybe support your use case.
|
||||
|
||||
|
||||
If you want to try simply you can:
|
||||
|
||||
- Subclass your pipeline of choice
|
||||
|
||||
```python
|
||||
class MyPipeline(TextClassificationPipeline):
|
||||
def postprocess():
|
||||
# Your code goes here
|
||||
scores = scores * 100
|
||||
# And here
|
||||
|
||||
|
||||
my_pipeline = MyPipeline(model=model, tokenizer=tokenizer, ...)
|
||||
# or if you use *pipeline* function, then:
|
||||
my_pipeline = pipeline(model="xxxx", pipeline_class=MyPipeline)
|
||||
```
|
||||
|
||||
That should enable you to do all the custom code you want.
|
||||
|
||||
|
||||
## Implementing a pipeline
|
||||
|
||||
[Implementing a new pipeline](../add_new_pipeline)
|
||||
|
||||
## The task specific pipelines
|
||||
|
||||
|
||||
### AudioClassificationPipeline
|
||||
|
||||
[[autodoc]] AudioClassificationPipeline
|
||||
- __call__
|
||||
- all
|
||||
|
||||
### AutomaticSpeechRecognitionPipeline
|
||||
|
||||
[[autodoc]] AutomaticSpeechRecognitionPipeline
|
||||
- __call__
|
||||
- all
|
||||
|
||||
### ConversationalPipeline
|
||||
|
||||
[[autodoc]] Conversation
|
||||
|
||||
[[autodoc]] ConversationalPipeline
|
||||
- __call__
|
||||
- all
|
||||
|
||||
### FeatureExtractionPipeline
|
||||
|
||||
[[autodoc]] FeatureExtractionPipeline
|
||||
- __call__
|
||||
- all
|
||||
|
||||
### FillMaskPipeline
|
||||
|
||||
[[autodoc]] FillMaskPipeline
|
||||
- __call__
|
||||
- all
|
||||
|
||||
### ImageClassificationPipeline
|
||||
|
||||
[[autodoc]] ImageClassificationPipeline
|
||||
- __call__
|
||||
- all
|
||||
|
||||
### ImageSegmentationPipeline
|
||||
|
||||
[[autodoc]] ImageSegmentationPipeline
|
||||
- __call__
|
||||
- all
|
||||
|
||||
### NerPipeline
|
||||
|
||||
[[autodoc]] NerPipeline
|
||||
|
||||
See [`TokenClassificationPipeline`] for all details.
|
||||
|
||||
### ObjectDetectionPipeline
|
||||
|
||||
[[autodoc]] ObjectDetectionPipeline
|
||||
- __call__
|
||||
- all
|
||||
|
||||
### QuestionAnsweringPipeline
|
||||
|
||||
[[autodoc]] QuestionAnsweringPipeline
|
||||
- __call__
|
||||
- all
|
||||
|
||||
### SummarizationPipeline
|
||||
|
||||
[[autodoc]] SummarizationPipeline
|
||||
- __call__
|
||||
- all
|
||||
|
||||
### TableQuestionAnsweringPipeline
|
||||
|
||||
[[autodoc]] TableQuestionAnsweringPipeline
|
||||
- __call__
|
||||
|
||||
### TextClassificationPipeline
|
||||
|
||||
[[autodoc]] TextClassificationPipeline
|
||||
- __call__
|
||||
- all
|
||||
|
||||
### TextGenerationPipeline
|
||||
|
||||
[[autodoc]] TextGenerationPipeline
|
||||
- __call__
|
||||
- all
|
||||
|
||||
### Text2TextGenerationPipeline
|
||||
|
||||
[[autodoc]] Text2TextGenerationPipeline
|
||||
- __call__
|
||||
- all
|
||||
|
||||
### TokenClassificationPipeline
|
||||
|
||||
[[autodoc]] TokenClassificationPipeline
|
||||
- __call__
|
||||
- all
|
||||
|
||||
### TranslationPipeline
|
||||
|
||||
[[autodoc]] TranslationPipeline
|
||||
- __call__
|
||||
- all
|
||||
|
||||
### ZeroShotClassificationPipeline
|
||||
|
||||
[[autodoc]] ZeroShotClassificationPipeline
|
||||
- __call__
|
||||
- all
|
||||
|
||||
### ZeroShotImageClassificationPipeline
|
||||
|
||||
[[autodoc]] ZeroShotImageClassificationPipeline
|
||||
- __call__
|
||||
- all
|
||||
|
||||
## Parent class: `Pipeline`
|
||||
|
||||
[[autodoc]] Pipeline
|
||||
159
docs/source/en/main_classes/processors.mdx
Normal file
159
docs/source/en/main_classes/processors.mdx
Normal file
@ -0,0 +1,159 @@
|
||||
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
# Processors
|
||||
|
||||
Processors can mean two different things in the Transformers library:
|
||||
- the objects that pre-process inputs for multi-modal models such as [Wav2Vec2](../model_doc/wav2vec2) (speech and text)
|
||||
or [CLIP](../model_doc/clip) (text and vision)
|
||||
- deprecated objects that were used in older versions of the library to preprocess data for GLUE or SQUAD.
|
||||
|
||||
## Multi-modal processors
|
||||
|
||||
Any multi-modal model will require an object to encode or decode the data that groups several modalities (among text,
|
||||
vision and audio). This is handled by objects called processors, which group tokenizers (for the text modality) and
|
||||
feature extractors (for vision and audio).
|
||||
|
||||
Those processors inherit from the following base class that implements the saving and loading functionality:
|
||||
|
||||
[[autodoc]] ProcessorMixin
|
||||
|
||||
## Deprecated processors
|
||||
|
||||
All processors follow the same architecture which is that of the
|
||||
[`~data.processors.utils.DataProcessor`]. The processor returns a list of
|
||||
[`~data.processors.utils.InputExample`]. These
|
||||
[`~data.processors.utils.InputExample`] can be converted to
|
||||
[`~data.processors.utils.InputFeatures`] in order to be fed to the model.
|
||||
|
||||
[[autodoc]] data.processors.utils.DataProcessor
|
||||
|
||||
[[autodoc]] data.processors.utils.InputExample
|
||||
|
||||
[[autodoc]] data.processors.utils.InputFeatures
|
||||
|
||||
## GLUE
|
||||
|
||||
[General Language Understanding Evaluation (GLUE)](https://gluebenchmark.com/) is a benchmark that evaluates the
|
||||
performance of models across a diverse set of existing NLU tasks. It was released together with the paper [GLUE: A
|
||||
multi-task benchmark and analysis platform for natural language understanding](https://openreview.net/pdf?id=rJ4km2R5t7)
|
||||
|
||||
This library hosts a total of 10 processors for the following tasks: MRPC, MNLI, MNLI (mismatched), CoLA, SST2, STSB,
|
||||
QQP, QNLI, RTE and WNLI.
|
||||
|
||||
Those processors are:
|
||||
|
||||
- [`~data.processors.utils.MrpcProcessor`]
|
||||
- [`~data.processors.utils.MnliProcessor`]
|
||||
- [`~data.processors.utils.MnliMismatchedProcessor`]
|
||||
- [`~data.processors.utils.Sst2Processor`]
|
||||
- [`~data.processors.utils.StsbProcessor`]
|
||||
- [`~data.processors.utils.QqpProcessor`]
|
||||
- [`~data.processors.utils.QnliProcessor`]
|
||||
- [`~data.processors.utils.RteProcessor`]
|
||||
- [`~data.processors.utils.WnliProcessor`]
|
||||
|
||||
Additionally, the following method can be used to load values from a data file and convert them to a list of
|
||||
[`~data.processors.utils.InputExample`].
|
||||
|
||||
[[autodoc]] data.processors.glue.glue_convert_examples_to_features
|
||||
|
||||
|
||||
## XNLI
|
||||
|
||||
[The Cross-Lingual NLI Corpus (XNLI)](https://www.nyu.edu/projects/bowman/xnli/) is a benchmark that evaluates the
|
||||
quality of cross-lingual text representations. XNLI is crowd-sourced dataset based on [*MultiNLI*](http://www.nyu.edu/projects/bowman/multinli/): pairs of text are labeled with textual entailment annotations for 15
|
||||
different languages (including both high-resource language such as English and low-resource languages such as Swahili).
|
||||
|
||||
It was released together with the paper [XNLI: Evaluating Cross-lingual Sentence Representations](https://arxiv.org/abs/1809.05053)
|
||||
|
||||
This library hosts the processor to load the XNLI data:
|
||||
|
||||
- [`~data.processors.utils.XnliProcessor`]
|
||||
|
||||
Please note that since the gold labels are available on the test set, evaluation is performed on the test set.
|
||||
|
||||
An example using these processors is given in the [run_xnli.py](https://github.com/huggingface/transformers/tree/main/examples/legacy/text-classification/run_xnli.py) script.
|
||||
|
||||
|
||||
## SQuAD
|
||||
|
||||
[The Stanford Question Answering Dataset (SQuAD)](https://rajpurkar.github.io/SQuAD-explorer//) is a benchmark that
|
||||
evaluates the performance of models on question answering. Two versions are available, v1.1 and v2.0. The first version
|
||||
(v1.1) was released together with the paper [SQuAD: 100,000+ Questions for Machine Comprehension of Text](https://arxiv.org/abs/1606.05250). The second version (v2.0) was released alongside the paper [Know What You Don't
|
||||
Know: Unanswerable Questions for SQuAD](https://arxiv.org/abs/1806.03822).
|
||||
|
||||
This library hosts a processor for each of the two versions:
|
||||
|
||||
### Processors
|
||||
|
||||
Those processors are:
|
||||
|
||||
- [`~data.processors.utils.SquadV1Processor`]
|
||||
- [`~data.processors.utils.SquadV2Processor`]
|
||||
|
||||
They both inherit from the abstract class [`~data.processors.utils.SquadProcessor`]
|
||||
|
||||
[[autodoc]] data.processors.squad.SquadProcessor
|
||||
- all
|
||||
|
||||
Additionally, the following method can be used to convert SQuAD examples into
|
||||
[`~data.processors.utils.SquadFeatures`] that can be used as model inputs.
|
||||
|
||||
[[autodoc]] data.processors.squad.squad_convert_examples_to_features
|
||||
|
||||
|
||||
These processors as well as the aforementionned method can be used with files containing the data as well as with the
|
||||
*tensorflow_datasets* package. Examples are given below.
|
||||
|
||||
|
||||
### Example usage
|
||||
|
||||
Here is an example using the processors as well as the conversion method using data files:
|
||||
|
||||
```python
|
||||
# Loading a V2 processor
|
||||
processor = SquadV2Processor()
|
||||
examples = processor.get_dev_examples(squad_v2_data_dir)
|
||||
|
||||
# Loading a V1 processor
|
||||
processor = SquadV1Processor()
|
||||
examples = processor.get_dev_examples(squad_v1_data_dir)
|
||||
|
||||
features = squad_convert_examples_to_features(
|
||||
examples=examples,
|
||||
tokenizer=tokenizer,
|
||||
max_seq_length=max_seq_length,
|
||||
doc_stride=args.doc_stride,
|
||||
max_query_length=max_query_length,
|
||||
is_training=not evaluate,
|
||||
)
|
||||
```
|
||||
|
||||
Using *tensorflow_datasets* is as easy as using a data file:
|
||||
|
||||
```python
|
||||
# tensorflow_datasets only handle Squad V1.
|
||||
tfds_examples = tfds.load("squad")
|
||||
examples = SquadV1Processor().get_examples_from_dataset(tfds_examples, evaluate=evaluate)
|
||||
|
||||
features = squad_convert_examples_to_features(
|
||||
examples=examples,
|
||||
tokenizer=tokenizer,
|
||||
max_seq_length=max_seq_length,
|
||||
doc_stride=args.doc_stride,
|
||||
max_query_length=max_query_length,
|
||||
is_training=not evaluate,
|
||||
)
|
||||
```
|
||||
|
||||
Another example using these processors is given in the [run_squad.py](https://github.com/huggingface/transformers/tree/main/examples/legacy/question-answering/run_squad.py) script.
|
||||
40
docs/source/en/main_classes/text_generation.mdx
Normal file
40
docs/source/en/main_classes/text_generation.mdx
Normal file
@ -0,0 +1,40 @@
|
||||
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
# Generation
|
||||
|
||||
Each framework has a generate method for auto-regressive text generation implemented in their respective `GenerationMixin` class:
|
||||
|
||||
- PyTorch [`~generation_utils.GenerationMixin.generate`] is implemented in [`~generation_utils.GenerationMixin`].
|
||||
- TensorFlow [`~generation_tf_utils.TFGenerationMixin.generate`] is implemented in [`~generation_tf_utils.TFGenerationMixin`].
|
||||
- Flax/JAX [`~generation_flax_utils.FlaxGenerationMixin.generate`] is implemented in [`~generation_flax_utils.FlaxGenerationMixin`].
|
||||
|
||||
## GenerationMixin
|
||||
|
||||
[[autodoc]] generation_utils.GenerationMixin
|
||||
- generate
|
||||
- greedy_search
|
||||
- sample
|
||||
- beam_search
|
||||
- beam_sample
|
||||
- group_beam_search
|
||||
- constrained_beam_search
|
||||
|
||||
## TFGenerationMixin
|
||||
|
||||
[[autodoc]] generation_tf_utils.TFGenerationMixin
|
||||
- generate
|
||||
|
||||
## FlaxGenerationMixin
|
||||
|
||||
[[autodoc]] generation_flax_utils.FlaxGenerationMixin
|
||||
- generate
|
||||
77
docs/source/en/main_classes/tokenizer.mdx
Normal file
77
docs/source/en/main_classes/tokenizer.mdx
Normal file
@ -0,0 +1,77 @@
|
||||
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
# Tokenizer
|
||||
|
||||
A tokenizer is in charge of preparing the inputs for a model. The library contains tokenizers for all the models. Most
|
||||
of the tokenizers are available in two flavors: a full python implementation and a "Fast" implementation based on the
|
||||
Rust library [🤗 Tokenizers](https://github.com/huggingface/tokenizers). The "Fast" implementations allows:
|
||||
|
||||
1. a significant speed-up in particular when doing batched tokenization and
|
||||
2. additional methods to map between the original string (character and words) and the token space (e.g. getting the
|
||||
index of the token comprising a given character or the span of characters corresponding to a given token). Currently
|
||||
no "Fast" implementation is available for the SentencePiece-based tokenizers (for T5, ALBERT, CamemBERT, XLM-RoBERTa
|
||||
and XLNet models).
|
||||
|
||||
The base classes [`PreTrainedTokenizer`] and [`PreTrainedTokenizerFast`]
|
||||
implement the common methods for encoding string inputs in model inputs (see below) and instantiating/saving python and
|
||||
"Fast" tokenizers either from a local file or directory or from a pretrained tokenizer provided by the library
|
||||
(downloaded from HuggingFace's AWS S3 repository). They both rely on
|
||||
[`~tokenization_utils_base.PreTrainedTokenizerBase`] that contains the common methods, and
|
||||
[`~tokenization_utils_base.SpecialTokensMixin`].
|
||||
|
||||
[`PreTrainedTokenizer`] and [`PreTrainedTokenizerFast`] thus implement the main
|
||||
methods for using all the tokenizers:
|
||||
|
||||
- Tokenizing (splitting strings in sub-word token strings), converting tokens strings to ids and back, and
|
||||
encoding/decoding (i.e., tokenizing and converting to integers).
|
||||
- Adding new tokens to the vocabulary in a way that is independent of the underlying structure (BPE, SentencePiece...).
|
||||
- Managing special tokens (like mask, beginning-of-sentence, etc.): adding them, assigning them to attributes in the
|
||||
tokenizer for easy access and making sure they are not split during tokenization.
|
||||
|
||||
[`BatchEncoding`] holds the output of the
|
||||
[`~tokenization_utils_base.PreTrainedTokenizerBase`]'s encoding methods (`__call__`,
|
||||
`encode_plus` and `batch_encode_plus`) and is derived from a Python dictionary. When the tokenizer is a pure python
|
||||
tokenizer, this class behaves just like a standard python dictionary and holds the various model inputs computed by
|
||||
these methods (`input_ids`, `attention_mask`...). When the tokenizer is a "Fast" tokenizer (i.e., backed by
|
||||
HuggingFace [tokenizers library](https://github.com/huggingface/tokenizers)), this class provides in addition
|
||||
several advanced alignment methods which can be used to map between the original string (character and words) and the
|
||||
token space (e.g., getting the index of the token comprising a given character or the span of characters corresponding
|
||||
to a given token).
|
||||
|
||||
|
||||
## PreTrainedTokenizer
|
||||
|
||||
[[autodoc]] PreTrainedTokenizer
|
||||
- __call__
|
||||
- batch_decode
|
||||
- decode
|
||||
- encode
|
||||
- push_to_hub
|
||||
- all
|
||||
|
||||
## PreTrainedTokenizerFast
|
||||
|
||||
The [`PreTrainedTokenizerFast`] depend on the [tokenizers](https://huggingface.co/docs/tokenizers) library. The tokenizers obtained from the 🤗 tokenizers library can be
|
||||
loaded very simply into 🤗 transformers. Take a look at the [Using tokenizers from 🤗 tokenizers](../fast_tokenizers) page to understand how this is done.
|
||||
|
||||
[[autodoc]] PreTrainedTokenizerFast
|
||||
- __call__
|
||||
- batch_decode
|
||||
- decode
|
||||
- encode
|
||||
- push_to_hub
|
||||
- all
|
||||
|
||||
## BatchEncoding
|
||||
|
||||
[[autodoc]] BatchEncoding
|
||||
@ -40,27 +40,29 @@ The [`Trainer`] contains the basic training loop which supports the above featur
|
||||
The [`Trainer`] class is optimized for 🤗 Transformers models and can have surprising behaviors
|
||||
when you use it on other models. When using it on your own model, make sure:
|
||||
|
||||
- your model always return tuples or subclasses of [`~file_utils.ModelOutput`].
|
||||
- your model always return tuples or subclasses of [`~utils.ModelOutput`].
|
||||
- your model can compute the loss if a `labels` argument is provided and that loss is returned as the first
|
||||
element of the tuple (if your model returns tuples)
|
||||
- your model can accept multiple label arguments (use the `label_names` in your [`TrainingArguments`] to indicate their name to the [`Trainer`]) but none of them should be named `"label"`.
|
||||
|
||||
</Tip>
|
||||
|
||||
Here is an example of how to customize [`Trainer`] using a custom loss function for multi-label classification:
|
||||
Here is an example of how to customize [`Trainer`] to use a weighted loss (useful when you have an unbalanced training set):
|
||||
|
||||
```python
|
||||
from torch import nn
|
||||
from transformers import Trainer
|
||||
|
||||
class MultilabelTrainer(Trainer):
|
||||
|
||||
class CustomTrainer(Trainer):
|
||||
def compute_loss(self, model, inputs, return_outputs=False):
|
||||
labels = inputs.get("labels")
|
||||
# forward pass
|
||||
outputs = model(**inputs)
|
||||
logits = outputs.get('logits')
|
||||
loss_fct = nn.BCEWithLogitsLoss()
|
||||
loss = loss_fct(logits.view(-1, self.model.config.num_labels),
|
||||
labels.float().view(-1, self.model.config.num_labels))
|
||||
logits = outputs.get("logits")
|
||||
# compute custom loss (suppose one has 3 labels with different weights)
|
||||
loss_fct = nn.CrossEntropyLoss(weight=torch.tensor([1.0, 2.0, 3.0]))
|
||||
loss = loss_fct(logits.view(-1, self.model.config.num_labels), labels.view(-1))
|
||||
return (loss, outputs) if return_outputs else loss
|
||||
```
|
||||
|
||||
@ -187,6 +189,103 @@ that make things deterministic (.e.g., `torch.backends.cudnn.deterministic`) may
|
||||
can't be done by default, but you can enable those yourself if needed.
|
||||
|
||||
|
||||
## Specific GPUs Selection
|
||||
|
||||
Let's discuss how you can tell your program which GPUs are to be used and in what order.
|
||||
|
||||
When using [`DistributedDataParallel`](https://pytorch.org/docs/stable/generated/torch.nn.parallel.DistributedDataParallel.html) to use only a subset of your GPUs, you simply specify the number of GPUs to use. For example, if you have 4 GPUs, but you wish to use the first 2 you can do:
|
||||
|
||||
```bash
|
||||
python -m torch.distributed.launch --nproc_per_node=2 trainer-program.py ...
|
||||
```
|
||||
|
||||
if you have either [`accelerate`](https://github.com/huggingface/accelerate) or [`deepspeed`](https://github.com/microsoft/DeepSpeed) installed you can also accomplish the same by using one of:
|
||||
```bash
|
||||
accelerate launch --num_processes 2 trainer-program.py ...
|
||||
```
|
||||
|
||||
```bash
|
||||
deepspeed --num_gpus 2 trainer-program.py ...
|
||||
```
|
||||
|
||||
You don't need to use the Accelerate or [the Deepspeed integration](Deepspeed) features to use these launchers.
|
||||
|
||||
|
||||
Until now you were able to tell the program how many GPUs to use. Now let's discuss how to select specific GPUs and control their order.
|
||||
|
||||
The following environment variables help you control which GPUs to use and their order.
|
||||
|
||||
**`CUDA_VISIBLE_DEVICES`**
|
||||
|
||||
If you have multiple GPUs and you'd like to use only 1 or a few of those GPUs, set the environment variable `CUDA_VISIBLE_DEVICES` to a list of the GPUs to be used.
|
||||
|
||||
For example, let's say you have 4 GPUs: 0, 1, 2 and 3. To run only on the physical GPUs 0 and 2, you can do:
|
||||
|
||||
```bash
|
||||
CUDA_VISIBLE_DEVICES=0,2 python -m torch.distributed.launch trainer-program.py ...
|
||||
```
|
||||
|
||||
So now pytorch will see only 2 GPUs, where your physical GPUs 0 and 2 are mapped to `cuda:0` and `cuda:1` correspondingly.
|
||||
|
||||
You can even change their order:
|
||||
|
||||
```bash
|
||||
CUDA_VISIBLE_DEVICES=2,0 python -m torch.distributed.launch trainer-program.py ...
|
||||
```
|
||||
|
||||
Here your physical GPUs 0 and 2 are mapped to `cuda:1` and `cuda:0` correspondingly.
|
||||
|
||||
The above examples were all for `DistributedDataParallel` use pattern, but the same method works for [`DataParallel`](https://pytorch.org/docs/stable/generated/torch.nn.DataParallel.html) as well:
|
||||
```bash
|
||||
CUDA_VISIBLE_DEVICES=2,0 python trainer-program.py ...
|
||||
```
|
||||
|
||||
To emulate an environment without GPUs simply set this environment variable to an empty value like so:
|
||||
|
||||
```bash
|
||||
CUDA_VISIBLE_DEVICES= python trainer-program.py ...
|
||||
```
|
||||
|
||||
As with any environment variable you can, of course, export those instead of adding these to the command line, as in:
|
||||
|
||||
|
||||
```bash
|
||||
export CUDA_VISIBLE_DEVICES=0,2
|
||||
python -m torch.distributed.launch trainer-program.py ...
|
||||
```
|
||||
|
||||
but this approach can be confusing since you may forget you set up the environment variable earlier and not understand why the wrong GPUs are used. Therefore, it's a common practice to set the environment variable just for a specific run on the same command line as it's shown in most examples of this section.
|
||||
|
||||
**`CUDA_DEVICE_ORDER`**
|
||||
|
||||
There is an additional environment variable `CUDA_DEVICE_ORDER` that controls how the physical devices are ordered. The two choices are:
|
||||
|
||||
1. ordered by PCIe bus IDs (matches `nvidia-smi`'s order) - this is the default.
|
||||
|
||||
```bash
|
||||
export CUDA_DEVICE_ORDER=PCI_BUS_ID
|
||||
```
|
||||
|
||||
2. ordered by GPU compute capabilities
|
||||
|
||||
```bash
|
||||
export CUDA_DEVICE_ORDER=FASTEST_FIRST
|
||||
```
|
||||
|
||||
Most of the time you don't need to care about this environment variable, but it's very helpful if you have a lopsided setup where you have an old and a new GPUs physically inserted in such a way so that the slow older card appears to be first. One way to fix that is to swap the cards. But if you can't swap the cards (e.g., if the cooling of the devices gets impacted) then setting `CUDA_DEVICE_ORDER=FASTEST_FIRST` will always put the newer faster card first. It'll be somewhat confusing though since `nvidia-smi` will still report them in the PCIe order.
|
||||
|
||||
The other solution to swapping the order is to use:
|
||||
|
||||
```bash
|
||||
export CUDA_VISIBLE_DEVICES=1,0
|
||||
```
|
||||
In this example we are working with just 2 GPUs, but of course the same would apply to as many GPUs as your computer has.
|
||||
|
||||
Also if you do set this environment variable it's the best to set it in your `~/.bashrc` file or some other startup config file and forget about it.
|
||||
|
||||
|
||||
|
||||
|
||||
## Trainer Integrations
|
||||
|
||||
The [`Trainer`] has been extended to support libraries that may dramatically improve your training
|
||||
@ -442,109 +541,29 @@ Known caveats:
|
||||
doing this yourself: `--sharded_ddp "zero_dp_3 auto_wrap"`.
|
||||
|
||||
|
||||
### DeepSpeed
|
||||
Sections that were moved:
|
||||
|
||||
|
||||
Moved to [Trainer DeepSpeed integration](deepspeed#trainer-deepspeed-integration).
|
||||
|
||||
|
||||
#### Installation
|
||||
|
||||
Moved to [Installation](deepspeed#deepspeed-installation).
|
||||
|
||||
|
||||
#### Deployment with multiple GPUs
|
||||
|
||||
Moved to [Deployment with multiple GPUs](deepspeed#deepspeed-multi-gpu).
|
||||
|
||||
|
||||
#### Deployment with one GPU
|
||||
|
||||
Moved to [Deployment with one GPU](deepspeed#deepspeed-one-gpu).
|
||||
|
||||
|
||||
#### Deployment in Notebooks
|
||||
|
||||
Moved to [Deployment in Notebooks](deepspeed#deepspeed-notebook).
|
||||
|
||||
|
||||
#### Configuration
|
||||
|
||||
Moved to [Configuration](deepspeed#deepspeed-config).
|
||||
|
||||
|
||||
#### Passing Configuration
|
||||
|
||||
Moved to [Passing Configuration](deepspeed#deepspeed-config-passing).
|
||||
|
||||
|
||||
#### Shared Configuration
|
||||
|
||||
Moved to [Shared Configuration](deepspeed#deepspeed-config-shared).
|
||||
|
||||
#### ZeRO
|
||||
|
||||
Moved to [ZeRO](deepspeed#deepspeed-zero).
|
||||
|
||||
##### ZeRO-2 Config
|
||||
|
||||
Moved to [ZeRO-2 Config](deepspeed#deepspeed-zero2-config).
|
||||
|
||||
##### ZeRO-3 Config
|
||||
|
||||
Moved to [ZeRO-3 Config](deepspeed#deepspeed-zero3-config).
|
||||
|
||||
|
||||
#### NVMe Support
|
||||
|
||||
Moved to [NVMe Support](deepspeed#deepspeed-nvme).
|
||||
|
||||
##### ZeRO-2 vs ZeRO-3 Performance
|
||||
|
||||
Moved to [ZeRO-2 vs ZeRO-3 Performance](deepspeed#deepspeed-zero2-zero3-performance).
|
||||
|
||||
##### ZeRO-2 Example
|
||||
|
||||
Moved to [ZeRO-2 Example](deepspeed#deepspeed-zero2-example).
|
||||
|
||||
##### ZeRO-3 Example
|
||||
|
||||
Moved to [ZeRO-3 Example](deepspeed#deepspeed-zero3-example).
|
||||
|
||||
|
||||
#### Optimizer and Scheduler
|
||||
|
||||
##### Optimizer
|
||||
|
||||
Moved to [Optimizer](deepspeed#deepspeed-optimizer).
|
||||
|
||||
|
||||
##### Scheduler
|
||||
|
||||
Moved to [Scheduler](deepspeed#deepspeed-scheduler).
|
||||
|
||||
#### fp32 Precision
|
||||
|
||||
Moved to [fp32 Precision](deepspeed#deepspeed-fp32).
|
||||
|
||||
#### Automatic Mixed Precision
|
||||
|
||||
Moved to [Automatic Mixed Precision](deepspeed#deepspeed-amp).
|
||||
|
||||
#### Batch Size
|
||||
|
||||
Moved to [Batch Size](deepspeed#deepspeed-bs).
|
||||
|
||||
#### Gradient Accumulation
|
||||
|
||||
Moved to [Gradient Accumulation](deepspeed#deepspeed-grad-acc).
|
||||
|
||||
|
||||
#### Gradient Clipping
|
||||
|
||||
Moved to [Gradient Clipping](deepspeed#deepspeed-grad-clip).
|
||||
|
||||
|
||||
#### Getting The Model Weights Out
|
||||
|
||||
Moved to [Getting The Model Weights Out](deepspeed#deepspeed-weight-extraction).
|
||||
[ <a href="./deepspeed#deepspeed-trainer-integration">DeepSpeed</a><a id="deepspeed"></a>
|
||||
| <a href="./deepspeed#deepspeed-installation">Installation</a><a id="installation"></a>
|
||||
| <a href="./deepspeed#deepspeed-multi-gpu">Deployment with multiple GPUs</a><a id="deployment-with-multiple-gpus"></a>
|
||||
| <a href="./deepspeed#deepspeed-one-gpu">Deployment with one GPU</a><a id="deployment-with-one-gpu"></a>
|
||||
| <a href="./deepspeed#deepspeed-notebook">Deployment in Notebooks</a><a id="deployment-in-notebooks"></a>
|
||||
| <a href="./deepspeed#deepspeed-config">Configuration</a><a id="configuration"></a>
|
||||
| <a href="./deepspeed#deepspeed-config-passing">Passing Configuration</a><a id="passing-configuration"></a>
|
||||
| <a href="./deepspeed#deepspeed-config-shared">Shared Configuration</a><a id="shared-configuration"></a>
|
||||
| <a href="./deepspeed#deepspeed-zero">ZeRO</a><a id="zero"></a>
|
||||
| <a href="./deepspeed#deepspeed-zero2-config">ZeRO-2 Config</a><a id="zero-2-config"></a>
|
||||
| <a href="./deepspeed#deepspeed-zero3-config">ZeRO-3 Config</a><a id="zero-3-config"></a>
|
||||
| <a href="./deepspeed#deepspeed-nvme">NVMe Support</a><a id="nvme-support"></a>
|
||||
| <a href="./deepspeed#deepspeed-zero2-zero3-performance">ZeRO-2 vs ZeRO-3 Performance</a><a id="zero-2-vs-zero-3-performance"></a>
|
||||
| <a href="./deepspeed#deepspeed-zero2-example">ZeRO-2 Example</a><a id="zero-2-example"></a>
|
||||
| <a href="./deepspeed#deepspeed-zero3-example">ZeRO-3 Example</a><a id="zero-3-example"></a>
|
||||
| <a href="./deepspeed#deepspeed-optimizer">Optimizer</a><a id="optimizer"></a>
|
||||
| <a href="./deepspeed#deepspeed-scheduler">Scheduler</a><a id="scheduler"></a>
|
||||
| <a href="./deepspeed#deepspeed-fp32">fp32 Precision</a><a id="fp32-precision"></a>
|
||||
| <a href="./deepspeed#deepspeed-amp">Automatic Mixed Precision</a><a id="automatic-mixed-precision"></a>
|
||||
| <a href="./deepspeed#deepspeed-bs">Batch Size</a><a id="batch-size"></a>
|
||||
| <a href="./deepspeed#deepspeed-grad-acc">Gradient Accumulation</a><a id="gradient-accumulation"></a>
|
||||
| <a href="./deepspeed#deepspeed-grad-clip">Gradient Clipping</a><a id="gradient-clipping"></a>
|
||||
| <a href="./deepspeed#deepspeed-weight-extraction">Getting The Model Weights Out</a><a id="getting-the-model-weights-out"></a>
|
||||
]
|
||||
@ -209,7 +209,7 @@ Here is a `pytorch-pretrained-bert` to 🤗 Transformers conversion example for
|
||||
|
||||
```python
|
||||
# Let's load our model
|
||||
model = BertForSequenceClassification.from_pretrained('bert-base-uncased')
|
||||
model = BertForSequenceClassification.from_pretrained("bert-base-uncased")
|
||||
|
||||
# If you used to have this line in pytorch-pretrained-bert:
|
||||
loss = model(input_ids, labels=labels)
|
||||
@ -222,7 +222,7 @@ loss = outputs[0]
|
||||
loss, logits = outputs[:2]
|
||||
|
||||
# And even the attention weights if you configure the model to output them (and other outputs too, see the docstrings and documentation)
|
||||
model = BertForSequenceClassification.from_pretrained('bert-base-uncased', output_attentions=True)
|
||||
model = BertForSequenceClassification.from_pretrained("bert-base-uncased", output_attentions=True)
|
||||
outputs = model(input_ids, labels=labels)
|
||||
loss, logits, attentions = outputs
|
||||
```
|
||||
@ -241,23 +241,23 @@ Here is an example:
|
||||
|
||||
```python
|
||||
### Let's load a model and tokenizer
|
||||
model = BertForSequenceClassification.from_pretrained('bert-base-uncased')
|
||||
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
|
||||
model = BertForSequenceClassification.from_pretrained("bert-base-uncased")
|
||||
tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
|
||||
|
||||
### Do some stuff to our model and tokenizer
|
||||
# Ex: add new tokens to the vocabulary and embeddings of our model
|
||||
tokenizer.add_tokens(['[SPECIAL_TOKEN_1]', '[SPECIAL_TOKEN_2]'])
|
||||
tokenizer.add_tokens(["[SPECIAL_TOKEN_1]", "[SPECIAL_TOKEN_2]"])
|
||||
model.resize_token_embeddings(len(tokenizer))
|
||||
# Train our model
|
||||
train(model)
|
||||
|
||||
### Now let's save our model and tokenizer to a directory
|
||||
model.save_pretrained('./my_saved_model_directory/')
|
||||
tokenizer.save_pretrained('./my_saved_model_directory/')
|
||||
model.save_pretrained("./my_saved_model_directory/")
|
||||
tokenizer.save_pretrained("./my_saved_model_directory/")
|
||||
|
||||
### Reload the model and the tokenizer
|
||||
model = BertForSequenceClassification.from_pretrained('./my_saved_model_directory/')
|
||||
tokenizer = BertTokenizer.from_pretrained('./my_saved_model_directory/')
|
||||
model = BertForSequenceClassification.from_pretrained("./my_saved_model_directory/")
|
||||
tokenizer = BertTokenizer.from_pretrained("./my_saved_model_directory/")
|
||||
```
|
||||
|
||||
### Optimizers: BertAdam & OpenAIAdam are now AdamW, schedules are standard PyTorch schedules
|
||||
@ -283,7 +283,13 @@ num_warmup_steps = 100
|
||||
warmup_proportion = float(num_warmup_steps) / float(num_training_steps) # 0.1
|
||||
|
||||
### Previously BertAdam optimizer was instantiated like this:
|
||||
optimizer = BertAdam(model.parameters(), lr=lr, schedule='warmup_linear', warmup=warmup_proportion, num_training_steps=num_training_steps)
|
||||
optimizer = BertAdam(
|
||||
model.parameters(),
|
||||
lr=lr,
|
||||
schedule="warmup_linear",
|
||||
warmup=warmup_proportion,
|
||||
num_training_steps=num_training_steps,
|
||||
)
|
||||
### and used like this:
|
||||
for batch in train_data:
|
||||
loss = model(batch)
|
||||
@ -291,13 +297,19 @@ for batch in train_data:
|
||||
optimizer.step()
|
||||
|
||||
### In 🤗 Transformers, optimizer and schedules are split and instantiated like this:
|
||||
optimizer = AdamW(model.parameters(), lr=lr, correct_bias=False) # To reproduce BertAdam specific behavior set correct_bias=False
|
||||
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=num_training_steps) # PyTorch scheduler
|
||||
optimizer = AdamW(
|
||||
model.parameters(), lr=lr, correct_bias=False
|
||||
) # To reproduce BertAdam specific behavior set correct_bias=False
|
||||
scheduler = get_linear_schedule_with_warmup(
|
||||
optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=num_training_steps
|
||||
) # PyTorch scheduler
|
||||
### and used like this:
|
||||
for batch in train_data:
|
||||
loss = model(batch)
|
||||
loss.backward()
|
||||
torch.nn.utils.clip_grad_norm_(model.parameters(), max_grad_norm) # Gradient clipping is not in AdamW anymore (so you can use amp without issue)
|
||||
torch.nn.utils.clip_grad_norm_(
|
||||
model.parameters(), max_grad_norm
|
||||
) # Gradient clipping is not in AdamW anymore (so you can use amp without issue)
|
||||
optimizer.step()
|
||||
scheduler.step()
|
||||
```
|
||||
170
docs/source/en/model_doc/albert.mdx
Normal file
170
docs/source/en/model_doc/albert.mdx
Normal file
@ -0,0 +1,170 @@
|
||||
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
# ALBERT
|
||||
|
||||
## Overview
|
||||
|
||||
The ALBERT model was proposed in [ALBERT: A Lite BERT for Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942) by Zhenzhong Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma,
|
||||
Radu Soricut. It presents two parameter-reduction techniques to lower memory consumption and increase the training
|
||||
speed of BERT:
|
||||
|
||||
- Splitting the embedding matrix into two smaller matrices.
|
||||
- Using repeating layers split among groups.
|
||||
|
||||
The abstract from the paper is the following:
|
||||
|
||||
*Increasing model size when pretraining natural language representations often results in improved performance on
|
||||
downstream tasks. However, at some point further model increases become harder due to GPU/TPU memory limitations,
|
||||
longer training times, and unexpected model degradation. To address these problems, we present two parameter-reduction
|
||||
techniques to lower memory consumption and increase the training speed of BERT. Comprehensive empirical evidence shows
|
||||
that our proposed methods lead to models that scale much better compared to the original BERT. We also use a
|
||||
self-supervised loss that focuses on modeling inter-sentence coherence, and show it consistently helps downstream tasks
|
||||
with multi-sentence inputs. As a result, our best model establishes new state-of-the-art results on the GLUE, RACE, and
|
||||
SQuAD benchmarks while having fewer parameters compared to BERT-large.*
|
||||
|
||||
Tips:
|
||||
|
||||
- ALBERT is a model with absolute position embeddings so it's usually advised to pad the inputs on the right rather
|
||||
than the left.
|
||||
- ALBERT uses repeating layers which results in a small memory footprint, however the computational cost remains
|
||||
similar to a BERT-like architecture with the same number of hidden layers as it has to iterate through the same
|
||||
number of (repeating) layers.
|
||||
|
||||
This model was contributed by [lysandre](https://huggingface.co/lysandre). This model jax version was contributed by
|
||||
[kamalkraj](https://huggingface.co/kamalkraj). The original code can be found [here](https://github.com/google-research/ALBERT).
|
||||
|
||||
## AlbertConfig
|
||||
|
||||
[[autodoc]] AlbertConfig
|
||||
|
||||
## AlbertTokenizer
|
||||
|
||||
[[autodoc]] AlbertTokenizer
|
||||
- build_inputs_with_special_tokens
|
||||
- get_special_tokens_mask
|
||||
- create_token_type_ids_from_sequences
|
||||
- save_vocabulary
|
||||
|
||||
## AlbertTokenizerFast
|
||||
|
||||
[[autodoc]] AlbertTokenizerFast
|
||||
|
||||
## Albert specific outputs
|
||||
|
||||
[[autodoc]] models.albert.modeling_albert.AlbertForPreTrainingOutput
|
||||
|
||||
[[autodoc]] models.albert.modeling_tf_albert.TFAlbertForPreTrainingOutput
|
||||
|
||||
## AlbertModel
|
||||
|
||||
[[autodoc]] AlbertModel
|
||||
- forward
|
||||
|
||||
## AlbertForPreTraining
|
||||
|
||||
[[autodoc]] AlbertForPreTraining
|
||||
- forward
|
||||
|
||||
## AlbertForMaskedLM
|
||||
|
||||
[[autodoc]] AlbertForMaskedLM
|
||||
- forward
|
||||
|
||||
## AlbertForSequenceClassification
|
||||
|
||||
[[autodoc]] AlbertForSequenceClassification
|
||||
- forward
|
||||
|
||||
## AlbertForMultipleChoice
|
||||
|
||||
[[autodoc]] AlbertForMultipleChoice
|
||||
|
||||
## AlbertForTokenClassification
|
||||
|
||||
[[autodoc]] AlbertForTokenClassification
|
||||
- forward
|
||||
|
||||
## AlbertForQuestionAnswering
|
||||
|
||||
[[autodoc]] AlbertForQuestionAnswering
|
||||
- forward
|
||||
|
||||
## TFAlbertModel
|
||||
|
||||
[[autodoc]] TFAlbertModel
|
||||
- call
|
||||
|
||||
## TFAlbertForPreTraining
|
||||
|
||||
[[autodoc]] TFAlbertForPreTraining
|
||||
- call
|
||||
|
||||
## TFAlbertForMaskedLM
|
||||
|
||||
[[autodoc]] TFAlbertForMaskedLM
|
||||
- call
|
||||
|
||||
## TFAlbertForSequenceClassification
|
||||
|
||||
[[autodoc]] TFAlbertForSequenceClassification
|
||||
- call
|
||||
|
||||
## TFAlbertForMultipleChoice
|
||||
|
||||
[[autodoc]] TFAlbertForMultipleChoice
|
||||
- call
|
||||
|
||||
## TFAlbertForTokenClassification
|
||||
|
||||
[[autodoc]] TFAlbertForTokenClassification
|
||||
- call
|
||||
|
||||
## TFAlbertForQuestionAnswering
|
||||
|
||||
[[autodoc]] TFAlbertForQuestionAnswering
|
||||
- call
|
||||
|
||||
## FlaxAlbertModel
|
||||
|
||||
[[autodoc]] FlaxAlbertModel
|
||||
- __call__
|
||||
|
||||
## FlaxAlbertForPreTraining
|
||||
|
||||
[[autodoc]] FlaxAlbertForPreTraining
|
||||
- __call__
|
||||
|
||||
## FlaxAlbertForMaskedLM
|
||||
|
||||
[[autodoc]] FlaxAlbertForMaskedLM
|
||||
- __call__
|
||||
|
||||
## FlaxAlbertForSequenceClassification
|
||||
|
||||
[[autodoc]] FlaxAlbertForSequenceClassification
|
||||
- __call__
|
||||
|
||||
## FlaxAlbertForMultipleChoice
|
||||
|
||||
[[autodoc]] FlaxAlbertForMultipleChoice
|
||||
- __call__
|
||||
|
||||
## FlaxAlbertForTokenClassification
|
||||
|
||||
[[autodoc]] FlaxAlbertForTokenClassification
|
||||
- __call__
|
||||
|
||||
## FlaxAlbertForQuestionAnswering
|
||||
|
||||
[[autodoc]] FlaxAlbertForQuestionAnswering
|
||||
- __call__
|
||||
263
docs/source/en/model_doc/auto.mdx
Normal file
263
docs/source/en/model_doc/auto.mdx
Normal file
@ -0,0 +1,263 @@
|
||||
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
# Auto Classes
|
||||
|
||||
In many cases, the architecture you want to use can be guessed from the name or the path of the pretrained model you
|
||||
are supplying to the `from_pretrained()` method. AutoClasses are here to do this job for you so that you
|
||||
automatically retrieve the relevant model given the name/path to the pretrained weights/config/vocabulary.
|
||||
|
||||
Instantiating one of [`AutoConfig`], [`AutoModel`], and
|
||||
[`AutoTokenizer`] will directly create a class of the relevant architecture. For instance
|
||||
|
||||
|
||||
```python
|
||||
model = AutoModel.from_pretrained("bert-base-cased")
|
||||
```
|
||||
|
||||
will create a model that is an instance of [`BertModel`].
|
||||
|
||||
There is one class of `AutoModel` for each task, and for each backend (PyTorch, TensorFlow, or Flax).
|
||||
|
||||
## Extending the Auto Classes
|
||||
|
||||
Each of the auto classes has a method to be extended with your custom classes. For instance, if you have defined a
|
||||
custom class of model `NewModel`, make sure you have a `NewModelConfig` then you can add those to the auto
|
||||
classes like this:
|
||||
|
||||
```python
|
||||
from transformers import AutoConfig, AutoModel
|
||||
|
||||
AutoConfig.register("new-model", NewModelConfig)
|
||||
AutoModel.register(NewModelConfig, NewModel)
|
||||
```
|
||||
|
||||
You will then be able to use the auto classes like you would usually do!
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
If your `NewModelConfig` is a subclass of [`~transformer.PretrainedConfig`], make sure its
|
||||
`model_type` attribute is set to the same key you use when registering the config (here `"new-model"`).
|
||||
|
||||
Likewise, if your `NewModel` is a subclass of [`PreTrainedModel`], make sure its
|
||||
`config_class` attribute is set to the same class you use when registering the model (here
|
||||
`NewModelConfig`).
|
||||
|
||||
</Tip>
|
||||
|
||||
## AutoConfig
|
||||
|
||||
[[autodoc]] AutoConfig
|
||||
|
||||
## AutoTokenizer
|
||||
|
||||
[[autodoc]] AutoTokenizer
|
||||
|
||||
## AutoFeatureExtractor
|
||||
|
||||
[[autodoc]] AutoFeatureExtractor
|
||||
|
||||
## AutoProcessor
|
||||
|
||||
[[autodoc]] AutoProcessor
|
||||
|
||||
## AutoModel
|
||||
|
||||
[[autodoc]] AutoModel
|
||||
|
||||
## AutoModelForPreTraining
|
||||
|
||||
[[autodoc]] AutoModelForPreTraining
|
||||
|
||||
## AutoModelForCausalLM
|
||||
|
||||
[[autodoc]] AutoModelForCausalLM
|
||||
|
||||
## AutoModelForMaskedLM
|
||||
|
||||
[[autodoc]] AutoModelForMaskedLM
|
||||
|
||||
## AutoModelForSeq2SeqLM
|
||||
|
||||
[[autodoc]] AutoModelForSeq2SeqLM
|
||||
|
||||
## AutoModelForSequenceClassification
|
||||
|
||||
[[autodoc]] AutoModelForSequenceClassification
|
||||
|
||||
## AutoModelForMultipleChoice
|
||||
|
||||
[[autodoc]] AutoModelForMultipleChoice
|
||||
|
||||
## AutoModelForNextSentencePrediction
|
||||
|
||||
[[autodoc]] AutoModelForNextSentencePrediction
|
||||
|
||||
## AutoModelForTokenClassification
|
||||
|
||||
[[autodoc]] AutoModelForTokenClassification
|
||||
|
||||
## AutoModelForQuestionAnswering
|
||||
|
||||
[[autodoc]] AutoModelForQuestionAnswering
|
||||
|
||||
## AutoModelForTableQuestionAnswering
|
||||
|
||||
[[autodoc]] AutoModelForTableQuestionAnswering
|
||||
|
||||
## AutoModelForImageClassification
|
||||
|
||||
[[autodoc]] AutoModelForImageClassification
|
||||
|
||||
## AutoModelForVision2Seq
|
||||
|
||||
[[autodoc]] AutoModelForVision2Seq
|
||||
|
||||
## AutoModelForAudioClassification
|
||||
|
||||
[[autodoc]] AutoModelForAudioClassification
|
||||
|
||||
## AutoModelForAudioFrameClassification
|
||||
|
||||
[[autodoc]] AutoModelForAudioFrameClassification
|
||||
|
||||
## AutoModelForCTC
|
||||
|
||||
[[autodoc]] AutoModelForCTC
|
||||
|
||||
## AutoModelForSpeechSeq2Seq
|
||||
|
||||
[[autodoc]] AutoModelForSpeechSeq2Seq
|
||||
|
||||
## AutoModelForAudioXVector
|
||||
|
||||
[[autodoc]] AutoModelForAudioXVector
|
||||
|
||||
## AutoModelForMaskedImageModeling
|
||||
|
||||
[[autodoc]] AutoModelForMaskedImageModeling
|
||||
|
||||
## AutoModelForObjectDetection
|
||||
|
||||
[[autodoc]] AutoModelForObjectDetection
|
||||
|
||||
## AutoModelForImageSegmentation
|
||||
|
||||
[[autodoc]] AutoModelForImageSegmentation
|
||||
|
||||
## AutoModelForSemanticSegmentation
|
||||
|
||||
[[autodoc]] AutoModelForSemanticSegmentation
|
||||
|
||||
## AutoModelForInstanceSegmentation
|
||||
|
||||
[[autodoc]] AutoModelForInstanceSegmentation
|
||||
|
||||
## TFAutoModel
|
||||
|
||||
[[autodoc]] TFAutoModel
|
||||
|
||||
## TFAutoModelForPreTraining
|
||||
|
||||
[[autodoc]] TFAutoModelForPreTraining
|
||||
|
||||
## TFAutoModelForCausalLM
|
||||
|
||||
[[autodoc]] TFAutoModelForCausalLM
|
||||
|
||||
## TFAutoModelForImageClassification
|
||||
|
||||
[[autodoc]] TFAutoModelForImageClassification
|
||||
|
||||
## TFAutoModelForMaskedLM
|
||||
|
||||
[[autodoc]] TFAutoModelForMaskedLM
|
||||
|
||||
## TFAutoModelForSeq2SeqLM
|
||||
|
||||
[[autodoc]] TFAutoModelForSeq2SeqLM
|
||||
|
||||
## TFAutoModelForSequenceClassification
|
||||
|
||||
[[autodoc]] TFAutoModelForSequenceClassification
|
||||
|
||||
## TFAutoModelForMultipleChoice
|
||||
|
||||
[[autodoc]] TFAutoModelForMultipleChoice
|
||||
|
||||
## TFAutoModelForTableQuestionAnswering
|
||||
|
||||
[[autodoc]] TFAutoModelForTableQuestionAnswering
|
||||
|
||||
## TFAutoModelForTokenClassification
|
||||
|
||||
[[autodoc]] TFAutoModelForTokenClassification
|
||||
|
||||
## TFAutoModelForQuestionAnswering
|
||||
|
||||
[[autodoc]] TFAutoModelForQuestionAnswering
|
||||
|
||||
## TFAutoModelForVision2Seq
|
||||
|
||||
[[autodoc]] TFAutoModelForVision2Seq
|
||||
|
||||
## TFAutoModelForSpeechSeq2Seq
|
||||
|
||||
[[autodoc]] TFAutoModelForSpeechSeq2Seq
|
||||
|
||||
## FlaxAutoModel
|
||||
|
||||
[[autodoc]] FlaxAutoModel
|
||||
|
||||
## FlaxAutoModelForCausalLM
|
||||
|
||||
[[autodoc]] FlaxAutoModelForCausalLM
|
||||
|
||||
## FlaxAutoModelForPreTraining
|
||||
|
||||
[[autodoc]] FlaxAutoModelForPreTraining
|
||||
|
||||
## FlaxAutoModelForMaskedLM
|
||||
|
||||
[[autodoc]] FlaxAutoModelForMaskedLM
|
||||
|
||||
## FlaxAutoModelForSeq2SeqLM
|
||||
|
||||
[[autodoc]] FlaxAutoModelForSeq2SeqLM
|
||||
|
||||
## FlaxAutoModelForSequenceClassification
|
||||
|
||||
[[autodoc]] FlaxAutoModelForSequenceClassification
|
||||
|
||||
## FlaxAutoModelForQuestionAnswering
|
||||
|
||||
[[autodoc]] FlaxAutoModelForQuestionAnswering
|
||||
|
||||
## FlaxAutoModelForTokenClassification
|
||||
|
||||
[[autodoc]] FlaxAutoModelForTokenClassification
|
||||
|
||||
## FlaxAutoModelForMultipleChoice
|
||||
|
||||
[[autodoc]] FlaxAutoModelForMultipleChoice
|
||||
|
||||
## FlaxAutoModelForNextSentencePrediction
|
||||
|
||||
[[autodoc]] FlaxAutoModelForNextSentencePrediction
|
||||
|
||||
## FlaxAutoModelForImageClassification
|
||||
|
||||
[[autodoc]] FlaxAutoModelForImageClassification
|
||||
|
||||
## FlaxAutoModelForVision2Seq
|
||||
|
||||
[[autodoc]] FlaxAutoModelForVision2Seq
|
||||
159
docs/source/en/model_doc/bart.mdx
Normal file
159
docs/source/en/model_doc/bart.mdx
Normal file
@ -0,0 +1,159 @@
|
||||
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
# BART
|
||||
|
||||
**DISCLAIMER:** If you see something strange, file a [Github Issue](https://github.com/huggingface/transformers/issues/new?assignees=&labels=&template=bug-report.md&title) and assign
|
||||
@patrickvonplaten
|
||||
|
||||
## Overview
|
||||
|
||||
The Bart model was proposed in [BART: Denoising Sequence-to-Sequence Pre-training for Natural Language Generation,
|
||||
Translation, and Comprehension](https://arxiv.org/abs/1910.13461) by Mike Lewis, Yinhan Liu, Naman Goyal, Marjan
|
||||
Ghazvininejad, Abdelrahman Mohamed, Omer Levy, Ves Stoyanov and Luke Zettlemoyer on 29 Oct, 2019.
|
||||
|
||||
According to the abstract,
|
||||
|
||||
- Bart uses a standard seq2seq/machine translation architecture with a bidirectional encoder (like BERT) and a
|
||||
left-to-right decoder (like GPT).
|
||||
- The pretraining task involves randomly shuffling the order of the original sentences and a novel in-filling scheme,
|
||||
where spans of text are replaced with a single mask token.
|
||||
- BART is particularly effective when fine tuned for text generation but also works well for comprehension tasks. It
|
||||
matches the performance of RoBERTa with comparable training resources on GLUE and SQuAD, achieves new
|
||||
state-of-the-art results on a range of abstractive dialogue, question answering, and summarization tasks, with gains
|
||||
of up to 6 ROUGE.
|
||||
|
||||
This model was contributed by [sshleifer](https://huggingface.co/sshleifer). The Authors' code can be found [here](https://github.com/pytorch/fairseq/tree/master/examples/bart).
|
||||
|
||||
|
||||
### Examples
|
||||
|
||||
- Examples and scripts for fine-tuning BART and other models for sequence to sequence tasks can be found in
|
||||
[examples/pytorch/summarization/](https://github.com/huggingface/transformers/tree/main/examples/pytorch/summarization/README.md).
|
||||
- An example of how to train [`BartForConditionalGeneration`] with a Hugging Face `datasets`
|
||||
object can be found in this [forum discussion](https://discuss.huggingface.co/t/train-bart-for-conditional-generation-e-g-summarization/1904).
|
||||
- [Distilled checkpoints](https://huggingface.co/models?search=distilbart) are described in this [paper](https://arxiv.org/abs/2010.13002).
|
||||
|
||||
|
||||
## Implementation Notes
|
||||
|
||||
- Bart doesn't use `token_type_ids` for sequence classification. Use [`BartTokenizer`] or
|
||||
[`~BartTokenizer.encode`] to get the proper splitting.
|
||||
- The forward pass of [`BartModel`] will create the `decoder_input_ids` if they are not passed.
|
||||
This is different than some other modeling APIs. A typical use case of this feature is mask filling.
|
||||
- Model predictions are intended to be identical to the original implementation when
|
||||
`forced_bos_token_id=0`. This only works, however, if the string you pass to
|
||||
[`fairseq.encode`] starts with a space.
|
||||
- [`~generation_utils.GenerationMixin.generate`] should be used for conditional generation tasks like
|
||||
summarization, see the example in that docstrings.
|
||||
- Models that load the *facebook/bart-large-cnn* weights will not have a `mask_token_id`, or be able to perform
|
||||
mask-filling tasks.
|
||||
|
||||
## Mask Filling
|
||||
|
||||
The `facebook/bart-base` and `facebook/bart-large` checkpoints can be used to fill multi-token masks.
|
||||
|
||||
```python
|
||||
from transformers import BartForConditionalGeneration, BartTokenizer
|
||||
|
||||
model = BartForConditionalGeneration.from_pretrained("facebook/bart-large", forced_bos_token_id=0)
|
||||
tok = BartTokenizer.from_pretrained("facebook/bart-large")
|
||||
example_english_phrase = "UN Chief Says There Is No <mask> in Syria"
|
||||
batch = tok(example_english_phrase, return_tensors="pt")
|
||||
generated_ids = model.generate(batch["input_ids"])
|
||||
assert tok.batch_decode(generated_ids, skip_special_tokens=True) == [
|
||||
"UN Chief Says There Is No Plan to Stop Chemical Weapons in Syria"
|
||||
]
|
||||
```
|
||||
|
||||
## BartConfig
|
||||
|
||||
[[autodoc]] BartConfig
|
||||
- all
|
||||
|
||||
## BartTokenizer
|
||||
|
||||
[[autodoc]] BartTokenizer
|
||||
- all
|
||||
|
||||
## BartTokenizerFast
|
||||
|
||||
[[autodoc]] BartTokenizerFast
|
||||
- all
|
||||
|
||||
## BartModel
|
||||
|
||||
[[autodoc]] BartModel
|
||||
- forward
|
||||
|
||||
## BartForConditionalGeneration
|
||||
|
||||
[[autodoc]] BartForConditionalGeneration
|
||||
- forward
|
||||
|
||||
## BartForSequenceClassification
|
||||
|
||||
[[autodoc]] BartForSequenceClassification
|
||||
- forward
|
||||
|
||||
## BartForQuestionAnswering
|
||||
|
||||
[[autodoc]] BartForQuestionAnswering
|
||||
- forward
|
||||
|
||||
## BartForCausalLM
|
||||
|
||||
[[autodoc]] BartForCausalLM
|
||||
- forward
|
||||
|
||||
## TFBartModel
|
||||
|
||||
[[autodoc]] TFBartModel
|
||||
- call
|
||||
|
||||
## TFBartForConditionalGeneration
|
||||
|
||||
[[autodoc]] TFBartForConditionalGeneration
|
||||
- call
|
||||
|
||||
## FlaxBartModel
|
||||
|
||||
[[autodoc]] FlaxBartModel
|
||||
- __call__
|
||||
- encode
|
||||
- decode
|
||||
|
||||
## FlaxBartForConditionalGeneration
|
||||
|
||||
[[autodoc]] FlaxBartForConditionalGeneration
|
||||
- __call__
|
||||
- encode
|
||||
- decode
|
||||
|
||||
## FlaxBartForSequenceClassification
|
||||
|
||||
[[autodoc]] FlaxBartForSequenceClassification
|
||||
- __call__
|
||||
- encode
|
||||
- decode
|
||||
|
||||
## FlaxBartForQuestionAnswering
|
||||
|
||||
[[autodoc]] FlaxBartForQuestionAnswering
|
||||
- __call__
|
||||
- encode
|
||||
- decode
|
||||
|
||||
## FlaxBartForCausalLM
|
||||
|
||||
[[autodoc]] FlaxBartForCausalLM
|
||||
- __call__
|
||||
50
docs/source/en/model_doc/barthez.mdx
Normal file
50
docs/source/en/model_doc/barthez.mdx
Normal file
@ -0,0 +1,50 @@
|
||||
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
# BARThez
|
||||
|
||||
## Overview
|
||||
|
||||
The BARThez model was proposed in [BARThez: a Skilled Pretrained French Sequence-to-Sequence Model](https://arxiv.org/abs/2010.12321) by Moussa Kamal Eddine, Antoine J.-P. Tixier, Michalis Vazirgiannis on 23 Oct,
|
||||
2020.
|
||||
|
||||
The abstract of the paper:
|
||||
|
||||
|
||||
*Inductive transfer learning, enabled by self-supervised learning, have taken the entire Natural Language Processing
|
||||
(NLP) field by storm, with models such as BERT and BART setting new state of the art on countless natural language
|
||||
understanding tasks. While there are some notable exceptions, most of the available models and research have been
|
||||
conducted for the English language. In this work, we introduce BARThez, the first BART model for the French language
|
||||
(to the best of our knowledge). BARThez was pretrained on a very large monolingual French corpus from past research
|
||||
that we adapted to suit BART's perturbation schemes. Unlike already existing BERT-based French language models such as
|
||||
CamemBERT and FlauBERT, BARThez is particularly well-suited for generative tasks, since not only its encoder but also
|
||||
its decoder is pretrained. In addition to discriminative tasks from the FLUE benchmark, we evaluate BARThez on a novel
|
||||
summarization dataset, OrangeSum, that we release with this paper. We also continue the pretraining of an already
|
||||
pretrained multilingual BART on BARThez's corpus, and we show that the resulting model, which we call mBARTHez,
|
||||
provides a significant boost over vanilla BARThez, and is on par with or outperforms CamemBERT and FlauBERT.*
|
||||
|
||||
This model was contributed by [moussakam](https://huggingface.co/moussakam). The Authors' code can be found [here](https://github.com/moussaKam/BARThez).
|
||||
|
||||
|
||||
### Examples
|
||||
|
||||
- BARThez can be fine-tuned on sequence-to-sequence tasks in a similar way as BART, check:
|
||||
[examples/pytorch/summarization/](https://github.com/huggingface/transformers/tree/main/examples/pytorch/summarization/README.md).
|
||||
|
||||
|
||||
## BarthezTokenizer
|
||||
|
||||
[[autodoc]] BarthezTokenizer
|
||||
|
||||
## BarthezTokenizerFast
|
||||
|
||||
[[autodoc]] BarthezTokenizerFast
|
||||
82
docs/source/en/model_doc/bartpho.mdx
Normal file
82
docs/source/en/model_doc/bartpho.mdx
Normal file
@ -0,0 +1,82 @@
|
||||
<!--Copyright 2021 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
# BARTpho
|
||||
|
||||
## Overview
|
||||
|
||||
The BARTpho model was proposed in [BARTpho: Pre-trained Sequence-to-Sequence Models for Vietnamese](https://arxiv.org/abs/2109.09701) by Nguyen Luong Tran, Duong Minh Le and Dat Quoc Nguyen.
|
||||
|
||||
The abstract from the paper is the following:
|
||||
|
||||
*We present BARTpho with two versions -- BARTpho_word and BARTpho_syllable -- the first public large-scale monolingual
|
||||
sequence-to-sequence models pre-trained for Vietnamese. Our BARTpho uses the "large" architecture and pre-training
|
||||
scheme of the sequence-to-sequence denoising model BART, thus especially suitable for generative NLP tasks. Experiments
|
||||
on a downstream task of Vietnamese text summarization show that in both automatic and human evaluations, our BARTpho
|
||||
outperforms the strong baseline mBART and improves the state-of-the-art. We release BARTpho to facilitate future
|
||||
research and applications of generative Vietnamese NLP tasks.*
|
||||
|
||||
Example of use:
|
||||
|
||||
```python
|
||||
>>> import torch
|
||||
>>> from transformers import AutoModel, AutoTokenizer
|
||||
|
||||
>>> bartpho = AutoModel.from_pretrained("vinai/bartpho-syllable")
|
||||
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("vinai/bartpho-syllable")
|
||||
|
||||
>>> line = "Chúng tôi là những nghiên cứu viên."
|
||||
|
||||
>>> input_ids = tokenizer(line, return_tensors="pt")
|
||||
|
||||
>>> with torch.no_grad():
|
||||
... features = bartpho(**input_ids) # Models outputs are now tuples
|
||||
|
||||
>>> # With TensorFlow 2.0+:
|
||||
>>> from transformers import TFAutoModel
|
||||
|
||||
>>> bartpho = TFAutoModel.from_pretrained("vinai/bartpho-syllable")
|
||||
>>> input_ids = tokenizer(line, return_tensors="tf")
|
||||
>>> features = bartpho(**input_ids)
|
||||
```
|
||||
|
||||
Tips:
|
||||
|
||||
- Following mBART, BARTpho uses the "large" architecture of BART with an additional layer-normalization layer on top of
|
||||
both the encoder and decoder. Thus, usage examples in the [documentation of BART](bart), when adapting to use
|
||||
with BARTpho, should be adjusted by replacing the BART-specialized classes with the mBART-specialized counterparts.
|
||||
For example:
|
||||
|
||||
```python
|
||||
>>> from transformers import MBartForConditionalGeneration
|
||||
|
||||
>>> bartpho = MBartForConditionalGeneration.from_pretrained("vinai/bartpho-syllable")
|
||||
>>> TXT = "Chúng tôi là <mask> nghiên cứu viên."
|
||||
>>> input_ids = tokenizer([TXT], return_tensors="pt")["input_ids"]
|
||||
>>> logits = bartpho(input_ids).logits
|
||||
>>> masked_index = (input_ids[0] == tokenizer.mask_token_id).nonzero().item()
|
||||
>>> probs = logits[0, masked_index].softmax(dim=0)
|
||||
>>> values, predictions = probs.topk(5)
|
||||
>>> print(tokenizer.decode(predictions).split())
|
||||
```
|
||||
|
||||
- This implementation is only for tokenization: "monolingual_vocab_file" consists of Vietnamese-specialized types
|
||||
extracted from the pre-trained SentencePiece model "vocab_file" that is available from the multilingual XLM-RoBERTa.
|
||||
Other languages, if employing this pre-trained multilingual SentencePiece model "vocab_file" for subword
|
||||
segmentation, can reuse BartphoTokenizer with their own language-specialized "monolingual_vocab_file".
|
||||
|
||||
This model was contributed by [dqnguyen](https://huggingface.co/dqnguyen). The original code can be found [here](https://github.com/VinAIResearch/BARTpho).
|
||||
|
||||
## BartphoTokenizer
|
||||
|
||||
[[autodoc]] BartphoTokenizer
|
||||
114
docs/source/en/model_doc/beit.mdx
Normal file
114
docs/source/en/model_doc/beit.mdx
Normal file
@ -0,0 +1,114 @@
|
||||
<!--Copyright 2021 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
# BEiT
|
||||
|
||||
## Overview
|
||||
|
||||
The BEiT model was proposed in [BEiT: BERT Pre-Training of Image Transformers](https://arxiv.org/abs/2106.08254) by
|
||||
Hangbo Bao, Li Dong and Furu Wei. Inspired by BERT, BEiT is the first paper that makes self-supervised pre-training of
|
||||
Vision Transformers (ViTs) outperform supervised pre-training. Rather than pre-training the model to predict the class
|
||||
of an image (as done in the [original ViT paper](https://arxiv.org/abs/2010.11929)), BEiT models are pre-trained to
|
||||
predict visual tokens from the codebook of OpenAI's [DALL-E model](https://arxiv.org/abs/2102.12092) given masked
|
||||
patches.
|
||||
|
||||
The abstract from the paper is the following:
|
||||
|
||||
*We introduce a self-supervised vision representation model BEiT, which stands for Bidirectional Encoder representation
|
||||
from Image Transformers. Following BERT developed in the natural language processing area, we propose a masked image
|
||||
modeling task to pretrain vision Transformers. Specifically, each image has two views in our pre-training, i.e, image
|
||||
patches (such as 16x16 pixels), and visual tokens (i.e., discrete tokens). We first "tokenize" the original image into
|
||||
visual tokens. Then we randomly mask some image patches and fed them into the backbone Transformer. The pre-training
|
||||
objective is to recover the original visual tokens based on the corrupted image patches. After pre-training BEiT, we
|
||||
directly fine-tune the model parameters on downstream tasks by appending task layers upon the pretrained encoder.
|
||||
Experimental results on image classification and semantic segmentation show that our model achieves competitive results
|
||||
with previous pre-training methods. For example, base-size BEiT achieves 83.2% top-1 accuracy on ImageNet-1K,
|
||||
significantly outperforming from-scratch DeiT training (81.8%) with the same setup. Moreover, large-size BEiT obtains
|
||||
86.3% only using ImageNet-1K, even outperforming ViT-L with supervised pre-training on ImageNet-22K (85.2%).*
|
||||
|
||||
Tips:
|
||||
|
||||
- BEiT models are regular Vision Transformers, but pre-trained in a self-supervised way rather than supervised. They
|
||||
outperform both the [original model (ViT)](vit) as well as [Data-efficient Image Transformers (DeiT)](deit) when fine-tuned on ImageNet-1K and CIFAR-100. You can check out demo notebooks regarding inference as well as
|
||||
fine-tuning on custom data [here](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/VisionTransformer) (you can just replace
|
||||
[`ViTFeatureExtractor`] by [`BeitFeatureExtractor`] and
|
||||
[`ViTForImageClassification`] by [`BeitForImageClassification`]).
|
||||
- There's also a demo notebook available which showcases how to combine DALL-E's image tokenizer with BEiT for
|
||||
performing masked image modeling. You can find it [here](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/BEiT).
|
||||
- As the BEiT models expect each image to be of the same size (resolution), one can use
|
||||
[`BeitFeatureExtractor`] to resize (or rescale) and normalize images for the model.
|
||||
- Both the patch resolution and image resolution used during pre-training or fine-tuning are reflected in the name of
|
||||
each checkpoint. For example, `microsoft/beit-base-patch16-224` refers to a base-sized architecture with patch
|
||||
resolution of 16x16 and fine-tuning resolution of 224x224. All checkpoints can be found on the [hub](https://huggingface.co/models?search=microsoft/beit).
|
||||
- The available checkpoints are either (1) pre-trained on [ImageNet-22k](http://www.image-net.org/) (a collection of
|
||||
14 million images and 22k classes) only, (2) also fine-tuned on ImageNet-22k or (3) also fine-tuned on [ImageNet-1k](http://www.image-net.org/challenges/LSVRC/2012/) (also referred to as ILSVRC 2012, a collection of 1.3 million
|
||||
images and 1,000 classes).
|
||||
- BEiT uses relative position embeddings, inspired by the T5 model. During pre-training, the authors shared the
|
||||
relative position bias among the several self-attention layers. During fine-tuning, each layer's relative position
|
||||
bias is initialized with the shared relative position bias obtained after pre-training. Note that, if one wants to
|
||||
pre-train a model from scratch, one needs to either set the `use_relative_position_bias` or the
|
||||
`use_relative_position_bias` attribute of [`BeitConfig`] to `True` in order to add
|
||||
position embeddings.
|
||||
|
||||
This model was contributed by [nielsr](https://huggingface.co/nielsr). The JAX/FLAX version of this model was
|
||||
contributed by [kamalkraj](https://huggingface.co/kamalkraj). The original code can be found [here](https://github.com/microsoft/unilm/tree/master/beit).
|
||||
|
||||
|
||||
## BEiT specific outputs
|
||||
|
||||
[[autodoc]] models.beit.modeling_beit.BeitModelOutputWithPooling
|
||||
|
||||
[[autodoc]] models.beit.modeling_flax_beit.FlaxBeitModelOutputWithPooling
|
||||
|
||||
## BeitConfig
|
||||
|
||||
[[autodoc]] BeitConfig
|
||||
|
||||
## BeitFeatureExtractor
|
||||
|
||||
[[autodoc]] BeitFeatureExtractor
|
||||
- __call__
|
||||
|
||||
## BeitModel
|
||||
|
||||
[[autodoc]] BeitModel
|
||||
- forward
|
||||
|
||||
## BeitForMaskedImageModeling
|
||||
|
||||
[[autodoc]] BeitForMaskedImageModeling
|
||||
- forward
|
||||
|
||||
## BeitForImageClassification
|
||||
|
||||
[[autodoc]] BeitForImageClassification
|
||||
- forward
|
||||
|
||||
## BeitForSemanticSegmentation
|
||||
|
||||
[[autodoc]] BeitForSemanticSegmentation
|
||||
- forward
|
||||
|
||||
## FlaxBeitModel
|
||||
|
||||
[[autodoc]] FlaxBeitModel
|
||||
- __call__
|
||||
|
||||
## FlaxBeitForMaskedImageModeling
|
||||
|
||||
[[autodoc]] FlaxBeitForMaskedImageModeling
|
||||
- __call__
|
||||
|
||||
## FlaxBeitForImageClassification
|
||||
|
||||
[[autodoc]] FlaxBeitForImageClassification
|
||||
- __call__
|
||||
104
docs/source/en/model_doc/bert-generation.mdx
Normal file
104
docs/source/en/model_doc/bert-generation.mdx
Normal file
@ -0,0 +1,104 @@
|
||||
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
# BertGeneration
|
||||
|
||||
## Overview
|
||||
|
||||
The BertGeneration model is a BERT model that can be leveraged for sequence-to-sequence tasks using
|
||||
[`EncoderDecoderModel`] as proposed in [Leveraging Pre-trained Checkpoints for Sequence Generation
|
||||
Tasks](https://arxiv.org/abs/1907.12461) by Sascha Rothe, Shashi Narayan, Aliaksei Severyn.
|
||||
|
||||
The abstract from the paper is the following:
|
||||
|
||||
*Unsupervised pretraining of large neural models has recently revolutionized Natural Language Processing. By
|
||||
warm-starting from the publicly released checkpoints, NLP practitioners have pushed the state-of-the-art on multiple
|
||||
benchmarks while saving significant amounts of compute time. So far the focus has been mainly on the Natural Language
|
||||
Understanding tasks. In this paper, we demonstrate the efficacy of pre-trained checkpoints for Sequence Generation. We
|
||||
developed a Transformer-based sequence-to-sequence model that is compatible with publicly available pre-trained BERT,
|
||||
GPT-2 and RoBERTa checkpoints and conducted an extensive empirical study on the utility of initializing our model, both
|
||||
encoder and decoder, with these checkpoints. Our models result in new state-of-the-art results on Machine Translation,
|
||||
Text Summarization, Sentence Splitting, and Sentence Fusion.*
|
||||
|
||||
Usage:
|
||||
|
||||
- The model can be used in combination with the [`EncoderDecoderModel`] to leverage two pretrained
|
||||
BERT checkpoints for subsequent fine-tuning.
|
||||
|
||||
```python
|
||||
>>> # leverage checkpoints for Bert2Bert model...
|
||||
>>> # use BERT's cls token as BOS token and sep token as EOS token
|
||||
>>> encoder = BertGenerationEncoder.from_pretrained("bert-large-uncased", bos_token_id=101, eos_token_id=102)
|
||||
>>> # add cross attention layers and use BERT's cls token as BOS token and sep token as EOS token
|
||||
>>> decoder = BertGenerationDecoder.from_pretrained(
|
||||
... "bert-large-uncased", add_cross_attention=True, is_decoder=True, bos_token_id=101, eos_token_id=102
|
||||
... )
|
||||
>>> bert2bert = EncoderDecoderModel(encoder=encoder, decoder=decoder)
|
||||
|
||||
>>> # create tokenizer...
|
||||
>>> tokenizer = BertTokenizer.from_pretrained("bert-large-uncased")
|
||||
|
||||
>>> input_ids = tokenizer(
|
||||
... "This is a long article to summarize", add_special_tokens=False, return_tensors="pt"
|
||||
>>> ).input_ids
|
||||
>>> labels = tokenizer("This is a short summary", return_tensors="pt").input_ids
|
||||
|
||||
>>> # train...
|
||||
>>> loss = bert2bert(input_ids=input_ids, decoder_input_ids=labels, labels=labels).loss
|
||||
>>> loss.backward()
|
||||
```
|
||||
|
||||
- Pretrained [`EncoderDecoderModel`] are also directly available in the model hub, e.g.,
|
||||
|
||||
|
||||
```python
|
||||
>>> # instantiate sentence fusion model
|
||||
>>> sentence_fuser = EncoderDecoderModel.from_pretrained("google/roberta2roberta_L-24_discofuse")
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("google/roberta2roberta_L-24_discofuse")
|
||||
|
||||
>>> input_ids = tokenizer(
|
||||
... "This is the first sentence. This is the second sentence.", add_special_tokens=False, return_tensors="pt"
|
||||
>>> ).input_ids
|
||||
|
||||
>>> outputs = sentence_fuser.generate(input_ids)
|
||||
|
||||
>>> print(tokenizer.decode(outputs[0]))
|
||||
```
|
||||
|
||||
Tips:
|
||||
|
||||
- [`BertGenerationEncoder`] and [`BertGenerationDecoder`] should be used in
|
||||
combination with [`EncoderDecoder`].
|
||||
- For summarization, sentence splitting, sentence fusion and translation, no special tokens are required for the input.
|
||||
Therefore, no EOS token should be added to the end of the input.
|
||||
|
||||
This model was contributed by [patrickvonplaten](https://huggingface.co/patrickvonplaten). The original code can be
|
||||
found [here](https://tfhub.dev/s?module-type=text-generation&subtype=module,placeholder).
|
||||
|
||||
## BertGenerationConfig
|
||||
|
||||
[[autodoc]] BertGenerationConfig
|
||||
|
||||
## BertGenerationTokenizer
|
||||
|
||||
[[autodoc]] BertGenerationTokenizer
|
||||
- save_vocabulary
|
||||
|
||||
## BertGenerationEncoder
|
||||
|
||||
[[autodoc]] BertGenerationEncoder
|
||||
- forward
|
||||
|
||||
## BertGenerationDecoder
|
||||
|
||||
[[autodoc]] BertGenerationDecoder
|
||||
- forward
|
||||
74
docs/source/en/model_doc/bert-japanese.mdx
Normal file
74
docs/source/en/model_doc/bert-japanese.mdx
Normal file
@ -0,0 +1,74 @@
|
||||
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
# BertJapanese
|
||||
|
||||
## Overview
|
||||
|
||||
The BERT models trained on Japanese text.
|
||||
|
||||
There are models with two different tokenization methods:
|
||||
|
||||
- Tokenize with MeCab and WordPiece. This requires some extra dependencies, [fugashi](https://github.com/polm/fugashi) which is a wrapper around [MeCab](https://taku910.github.io/mecab/).
|
||||
- Tokenize into characters.
|
||||
|
||||
To use *MecabTokenizer*, you should `pip install transformers["ja"]` (or `pip install -e .["ja"]` if you install
|
||||
from source) to install dependencies.
|
||||
|
||||
See [details on cl-tohoku repository](https://github.com/cl-tohoku/bert-japanese).
|
||||
|
||||
Example of using a model with MeCab and WordPiece tokenization:
|
||||
|
||||
```python
|
||||
>>> import torch
|
||||
>>> from transformers import AutoModel, AutoTokenizer
|
||||
|
||||
>>> bertjapanese = AutoModel.from_pretrained("cl-tohoku/bert-base-japanese")
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("cl-tohoku/bert-base-japanese")
|
||||
|
||||
>>> ## Input Japanese Text
|
||||
>>> line = "吾輩は猫である。"
|
||||
|
||||
>>> inputs = tokenizer(line, return_tensors="pt")
|
||||
|
||||
>>> print(tokenizer.decode(inputs["input_ids"][0]))
|
||||
[CLS] 吾輩 は 猫 で ある 。 [SEP]
|
||||
|
||||
>>> outputs = bertjapanese(**inputs)
|
||||
```
|
||||
|
||||
Example of using a model with Character tokenization:
|
||||
|
||||
```python
|
||||
>>> bertjapanese = AutoModel.from_pretrained("cl-tohoku/bert-base-japanese-char")
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("cl-tohoku/bert-base-japanese-char")
|
||||
|
||||
>>> ## Input Japanese Text
|
||||
>>> line = "吾輩は猫である。"
|
||||
|
||||
>>> inputs = tokenizer(line, return_tensors="pt")
|
||||
|
||||
>>> print(tokenizer.decode(inputs["input_ids"][0]))
|
||||
[CLS] 吾 輩 は 猫 で あ る 。 [SEP]
|
||||
|
||||
>>> outputs = bertjapanese(**inputs)
|
||||
```
|
||||
|
||||
Tips:
|
||||
|
||||
- This implementation is the same as BERT, except for tokenization method. Refer to the [documentation of BERT](bert) for more usage examples.
|
||||
|
||||
This model was contributed by [cl-tohoku](https://huggingface.co/cl-tohoku).
|
||||
|
||||
## BertJapaneseTokenizer
|
||||
|
||||
[[autodoc]] BertJapaneseTokenizer
|
||||
197
docs/source/en/model_doc/bert.mdx
Normal file
197
docs/source/en/model_doc/bert.mdx
Normal file
@ -0,0 +1,197 @@
|
||||
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
# BERT
|
||||
|
||||
## Overview
|
||||
|
||||
The BERT model was proposed in [BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding](https://arxiv.org/abs/1810.04805) by Jacob Devlin, Ming-Wei Chang, Kenton Lee and Kristina Toutanova. It's a
|
||||
bidirectional transformer pretrained using a combination of masked language modeling objective and next sentence
|
||||
prediction on a large corpus comprising the Toronto Book Corpus and Wikipedia.
|
||||
|
||||
The abstract from the paper is the following:
|
||||
|
||||
*We introduce a new language representation model called BERT, which stands for Bidirectional Encoder Representations
|
||||
from Transformers. Unlike recent language representation models, BERT is designed to pre-train deep bidirectional
|
||||
representations from unlabeled text by jointly conditioning on both left and right context in all layers. As a result,
|
||||
the pre-trained BERT model can be fine-tuned with just one additional output layer to create state-of-the-art models
|
||||
for a wide range of tasks, such as question answering and language inference, without substantial task-specific
|
||||
architecture modifications.*
|
||||
|
||||
*BERT is conceptually simple and empirically powerful. It obtains new state-of-the-art results on eleven natural
|
||||
language processing tasks, including pushing the GLUE score to 80.5% (7.7% point absolute improvement), MultiNLI
|
||||
accuracy to 86.7% (4.6% absolute improvement), SQuAD v1.1 question answering Test F1 to 93.2 (1.5 point absolute
|
||||
improvement) and SQuAD v2.0 Test F1 to 83.1 (5.1 point absolute improvement).*
|
||||
|
||||
Tips:
|
||||
|
||||
- BERT is a model with absolute position embeddings so it's usually advised to pad the inputs on the right rather than
|
||||
the left.
|
||||
- BERT was trained with the masked language modeling (MLM) and next sentence prediction (NSP) objectives. It is
|
||||
efficient at predicting masked tokens and at NLU in general, but is not optimal for text generation.
|
||||
|
||||
This model was contributed by [thomwolf](https://huggingface.co/thomwolf). The original code can be found [here](https://github.com/google-research/bert).
|
||||
|
||||
## BertConfig
|
||||
|
||||
[[autodoc]] BertConfig
|
||||
- all
|
||||
|
||||
## BertTokenizer
|
||||
|
||||
[[autodoc]] BertTokenizer
|
||||
- build_inputs_with_special_tokens
|
||||
- get_special_tokens_mask
|
||||
- create_token_type_ids_from_sequences
|
||||
- save_vocabulary
|
||||
|
||||
## BertTokenizerFast
|
||||
|
||||
[[autodoc]] BertTokenizerFast
|
||||
|
||||
## Bert specific outputs
|
||||
|
||||
[[autodoc]] models.bert.modeling_bert.BertForPreTrainingOutput
|
||||
|
||||
[[autodoc]] models.bert.modeling_tf_bert.TFBertForPreTrainingOutput
|
||||
|
||||
[[autodoc]] models.bert.modeling_flax_bert.FlaxBertForPreTrainingOutput
|
||||
|
||||
## BertModel
|
||||
|
||||
[[autodoc]] BertModel
|
||||
- forward
|
||||
|
||||
## BertForPreTraining
|
||||
|
||||
[[autodoc]] BertForPreTraining
|
||||
- forward
|
||||
|
||||
## BertLMHeadModel
|
||||
|
||||
[[autodoc]] BertLMHeadModel
|
||||
- forward
|
||||
|
||||
## BertForMaskedLM
|
||||
|
||||
[[autodoc]] BertForMaskedLM
|
||||
- forward
|
||||
|
||||
## BertForNextSentencePrediction
|
||||
|
||||
[[autodoc]] BertForNextSentencePrediction
|
||||
- forward
|
||||
|
||||
## BertForSequenceClassification
|
||||
|
||||
[[autodoc]] BertForSequenceClassification
|
||||
- forward
|
||||
|
||||
## BertForMultipleChoice
|
||||
|
||||
[[autodoc]] BertForMultipleChoice
|
||||
- forward
|
||||
|
||||
## BertForTokenClassification
|
||||
|
||||
[[autodoc]] BertForTokenClassification
|
||||
- forward
|
||||
|
||||
## BertForQuestionAnswering
|
||||
|
||||
[[autodoc]] BertForQuestionAnswering
|
||||
- forward
|
||||
|
||||
## TFBertModel
|
||||
|
||||
[[autodoc]] TFBertModel
|
||||
- call
|
||||
|
||||
## TFBertForPreTraining
|
||||
|
||||
[[autodoc]] TFBertForPreTraining
|
||||
- call
|
||||
|
||||
## TFBertModelLMHeadModel
|
||||
|
||||
[[autodoc]] TFBertLMHeadModel
|
||||
- call
|
||||
|
||||
## TFBertForMaskedLM
|
||||
|
||||
[[autodoc]] TFBertForMaskedLM
|
||||
- call
|
||||
|
||||
## TFBertForNextSentencePrediction
|
||||
|
||||
[[autodoc]] TFBertForNextSentencePrediction
|
||||
- call
|
||||
|
||||
## TFBertForSequenceClassification
|
||||
|
||||
[[autodoc]] TFBertForSequenceClassification
|
||||
- call
|
||||
|
||||
## TFBertForMultipleChoice
|
||||
|
||||
[[autodoc]] TFBertForMultipleChoice
|
||||
- call
|
||||
|
||||
## TFBertForTokenClassification
|
||||
|
||||
[[autodoc]] TFBertForTokenClassification
|
||||
- call
|
||||
|
||||
## TFBertForQuestionAnswering
|
||||
|
||||
[[autodoc]] TFBertForQuestionAnswering
|
||||
- call
|
||||
|
||||
## FlaxBertModel
|
||||
|
||||
[[autodoc]] FlaxBertModel
|
||||
- __call__
|
||||
|
||||
## FlaxBertForPreTraining
|
||||
|
||||
[[autodoc]] FlaxBertForPreTraining
|
||||
- __call__
|
||||
|
||||
## FlaxBertForMaskedLM
|
||||
|
||||
[[autodoc]] FlaxBertForMaskedLM
|
||||
- __call__
|
||||
|
||||
## FlaxBertForNextSentencePrediction
|
||||
|
||||
[[autodoc]] FlaxBertForNextSentencePrediction
|
||||
- __call__
|
||||
|
||||
## FlaxBertForSequenceClassification
|
||||
|
||||
[[autodoc]] FlaxBertForSequenceClassification
|
||||
- __call__
|
||||
|
||||
## FlaxBertForMultipleChoice
|
||||
|
||||
[[autodoc]] FlaxBertForMultipleChoice
|
||||
- __call__
|
||||
|
||||
## FlaxBertForTokenClassification
|
||||
|
||||
[[autodoc]] FlaxBertForTokenClassification
|
||||
- __call__
|
||||
|
||||
## FlaxBertForQuestionAnswering
|
||||
|
||||
[[autodoc]] FlaxBertForQuestionAnswering
|
||||
- __call__
|
||||
58
docs/source/en/model_doc/bertweet.mdx
Normal file
58
docs/source/en/model_doc/bertweet.mdx
Normal file
@ -0,0 +1,58 @@
|
||||
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
# BERTweet
|
||||
|
||||
## Overview
|
||||
|
||||
The BERTweet model was proposed in [BERTweet: A pre-trained language model for English Tweets](https://www.aclweb.org/anthology/2020.emnlp-demos.2.pdf) by Dat Quoc Nguyen, Thanh Vu, Anh Tuan Nguyen.
|
||||
|
||||
The abstract from the paper is the following:
|
||||
|
||||
*We present BERTweet, the first public large-scale pre-trained language model for English Tweets. Our BERTweet, having
|
||||
the same architecture as BERT-base (Devlin et al., 2019), is trained using the RoBERTa pre-training procedure (Liu et
|
||||
al., 2019). Experiments show that BERTweet outperforms strong baselines RoBERTa-base and XLM-R-base (Conneau et al.,
|
||||
2020), producing better performance results than the previous state-of-the-art models on three Tweet NLP tasks:
|
||||
Part-of-speech tagging, Named-entity recognition and text classification.*
|
||||
|
||||
Example of use:
|
||||
|
||||
```python
|
||||
>>> import torch
|
||||
>>> from transformers import AutoModel, AutoTokenizer
|
||||
|
||||
>>> bertweet = AutoModel.from_pretrained("vinai/bertweet-base")
|
||||
|
||||
>>> # For transformers v4.x+:
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("vinai/bertweet-base", use_fast=False)
|
||||
|
||||
>>> # For transformers v3.x:
|
||||
>>> # tokenizer = AutoTokenizer.from_pretrained("vinai/bertweet-base")
|
||||
|
||||
>>> # INPUT TWEET IS ALREADY NORMALIZED!
|
||||
>>> line = "SC has first two presumptive cases of coronavirus , DHEC confirms HTTPURL via @USER :cry:"
|
||||
|
||||
>>> input_ids = torch.tensor([tokenizer.encode(line)])
|
||||
|
||||
>>> with torch.no_grad():
|
||||
... features = bertweet(input_ids) # Models outputs are now tuples
|
||||
|
||||
>>> # With TensorFlow 2.0+:
|
||||
>>> # from transformers import TFAutoModel
|
||||
>>> # bertweet = TFAutoModel.from_pretrained("vinai/bertweet-base")
|
||||
```
|
||||
|
||||
This model was contributed by [dqnguyen](https://huggingface.co/dqnguyen). The original code can be found [here](https://github.com/VinAIResearch/BERTweet).
|
||||
|
||||
## BertweetTokenizer
|
||||
|
||||
[[autodoc]] BertweetTokenizer
|
||||
146
docs/source/en/model_doc/big_bird.mdx
Normal file
146
docs/source/en/model_doc/big_bird.mdx
Normal file
@ -0,0 +1,146 @@
|
||||
<!--Copyright 2021 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
# BigBird
|
||||
|
||||
## Overview
|
||||
|
||||
The BigBird model was proposed in [Big Bird: Transformers for Longer Sequences](https://arxiv.org/abs/2007.14062) by
|
||||
Zaheer, Manzil and Guruganesh, Guru and Dubey, Kumar Avinava and Ainslie, Joshua and Alberti, Chris and Ontanon,
|
||||
Santiago and Pham, Philip and Ravula, Anirudh and Wang, Qifan and Yang, Li and others. BigBird, is a sparse-attention
|
||||
based transformer which extends Transformer based models, such as BERT to much longer sequences. In addition to sparse
|
||||
attention, BigBird also applies global attention as well as random attention to the input sequence. Theoretically, it
|
||||
has been shown that applying sparse, global, and random attention approximates full attention, while being
|
||||
computationally much more efficient for longer sequences. As a consequence of the capability to handle longer context,
|
||||
BigBird has shown improved performance on various long document NLP tasks, such as question answering and
|
||||
summarization, compared to BERT or RoBERTa.
|
||||
|
||||
The abstract from the paper is the following:
|
||||
|
||||
*Transformers-based models, such as BERT, have been one of the most successful deep learning models for NLP.
|
||||
Unfortunately, one of their core limitations is the quadratic dependency (mainly in terms of memory) on the sequence
|
||||
length due to their full attention mechanism. To remedy this, we propose, BigBird, a sparse attention mechanism that
|
||||
reduces this quadratic dependency to linear. We show that BigBird is a universal approximator of sequence functions and
|
||||
is Turing complete, thereby preserving these properties of the quadratic, full attention model. Along the way, our
|
||||
theoretical analysis reveals some of the benefits of having O(1) global tokens (such as CLS), that attend to the entire
|
||||
sequence as part of the sparse attention mechanism. The proposed sparse attention can handle sequences of length up to
|
||||
8x of what was previously possible using similar hardware. As a consequence of the capability to handle longer context,
|
||||
BigBird drastically improves performance on various NLP tasks such as question answering and summarization. We also
|
||||
propose novel applications to genomics data.*
|
||||
|
||||
Tips:
|
||||
|
||||
- For an in-detail explanation on how BigBird's attention works, see [this blog post](https://huggingface.co/blog/big-bird).
|
||||
- BigBird comes with 2 implementations: **original_full** & **block_sparse**. For the sequence length < 1024, using
|
||||
**original_full** is advised as there is no benefit in using **block_sparse** attention.
|
||||
- The code currently uses window size of 3 blocks and 2 global blocks.
|
||||
- Sequence length must be divisible by block size.
|
||||
- Current implementation supports only **ITC**.
|
||||
- Current implementation doesn't support **num_random_blocks = 0**
|
||||
|
||||
This model was contributed by [vasudevgupta](https://huggingface.co/vasudevgupta). The original code can be found
|
||||
[here](https://github.com/google-research/bigbird).
|
||||
|
||||
## BigBirdConfig
|
||||
|
||||
[[autodoc]] BigBirdConfig
|
||||
|
||||
## BigBirdTokenizer
|
||||
|
||||
[[autodoc]] BigBirdTokenizer
|
||||
- build_inputs_with_special_tokens
|
||||
- get_special_tokens_mask
|
||||
- create_token_type_ids_from_sequences
|
||||
- save_vocabulary
|
||||
|
||||
## BigBirdTokenizerFast
|
||||
|
||||
[[autodoc]] BigBirdTokenizerFast
|
||||
|
||||
## BigBird specific outputs
|
||||
|
||||
[[autodoc]] models.big_bird.modeling_big_bird.BigBirdForPreTrainingOutput
|
||||
|
||||
## BigBirdModel
|
||||
|
||||
[[autodoc]] BigBirdModel
|
||||
- forward
|
||||
|
||||
## BigBirdForPreTraining
|
||||
|
||||
[[autodoc]] BigBirdForPreTraining
|
||||
- forward
|
||||
|
||||
## BigBirdForCausalLM
|
||||
|
||||
[[autodoc]] BigBirdForCausalLM
|
||||
- forward
|
||||
|
||||
## BigBirdForMaskedLM
|
||||
|
||||
[[autodoc]] BigBirdForMaskedLM
|
||||
- forward
|
||||
|
||||
## BigBirdForSequenceClassification
|
||||
|
||||
[[autodoc]] BigBirdForSequenceClassification
|
||||
- forward
|
||||
|
||||
## BigBirdForMultipleChoice
|
||||
|
||||
[[autodoc]] BigBirdForMultipleChoice
|
||||
- forward
|
||||
|
||||
## BigBirdForTokenClassification
|
||||
|
||||
[[autodoc]] BigBirdForTokenClassification
|
||||
- forward
|
||||
|
||||
## BigBirdForQuestionAnswering
|
||||
|
||||
[[autodoc]] BigBirdForQuestionAnswering
|
||||
- forward
|
||||
|
||||
## FlaxBigBirdModel
|
||||
|
||||
[[autodoc]] FlaxBigBirdModel
|
||||
- __call__
|
||||
|
||||
## FlaxBigBirdForPreTraining
|
||||
|
||||
[[autodoc]] FlaxBigBirdForPreTraining
|
||||
- __call__
|
||||
|
||||
## FlaxBigBirdForMaskedLM
|
||||
|
||||
[[autodoc]] FlaxBigBirdForMaskedLM
|
||||
- __call__
|
||||
|
||||
## FlaxBigBirdForSequenceClassification
|
||||
|
||||
[[autodoc]] FlaxBigBirdForSequenceClassification
|
||||
- __call__
|
||||
|
||||
## FlaxBigBirdForMultipleChoice
|
||||
|
||||
[[autodoc]] FlaxBigBirdForMultipleChoice
|
||||
- __call__
|
||||
|
||||
## FlaxBigBirdForTokenClassification
|
||||
|
||||
[[autodoc]] FlaxBigBirdForTokenClassification
|
||||
- __call__
|
||||
|
||||
## FlaxBigBirdForQuestionAnswering
|
||||
|
||||
[[autodoc]] FlaxBigBirdForQuestionAnswering
|
||||
- __call__
|
||||
81
docs/source/en/model_doc/bigbird_pegasus.mdx
Normal file
81
docs/source/en/model_doc/bigbird_pegasus.mdx
Normal file
@ -0,0 +1,81 @@
|
||||
<!--Copyright 2021 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
# BigBirdPegasus
|
||||
|
||||
## Overview
|
||||
|
||||
The BigBird model was proposed in [Big Bird: Transformers for Longer Sequences](https://arxiv.org/abs/2007.14062) by
|
||||
Zaheer, Manzil and Guruganesh, Guru and Dubey, Kumar Avinava and Ainslie, Joshua and Alberti, Chris and Ontanon,
|
||||
Santiago and Pham, Philip and Ravula, Anirudh and Wang, Qifan and Yang, Li and others. BigBird, is a sparse-attention
|
||||
based transformer which extends Transformer based models, such as BERT to much longer sequences. In addition to sparse
|
||||
attention, BigBird also applies global attention as well as random attention to the input sequence. Theoretically, it
|
||||
has been shown that applying sparse, global, and random attention approximates full attention, while being
|
||||
computationally much more efficient for longer sequences. As a consequence of the capability to handle longer context,
|
||||
BigBird has shown improved performance on various long document NLP tasks, such as question answering and
|
||||
summarization, compared to BERT or RoBERTa.
|
||||
|
||||
The abstract from the paper is the following:
|
||||
|
||||
*Transformers-based models, such as BERT, have been one of the most successful deep learning models for NLP.
|
||||
Unfortunately, one of their core limitations is the quadratic dependency (mainly in terms of memory) on the sequence
|
||||
length due to their full attention mechanism. To remedy this, we propose, BigBird, a sparse attention mechanism that
|
||||
reduces this quadratic dependency to linear. We show that BigBird is a universal approximator of sequence functions and
|
||||
is Turing complete, thereby preserving these properties of the quadratic, full attention model. Along the way, our
|
||||
theoretical analysis reveals some of the benefits of having O(1) global tokens (such as CLS), that attend to the entire
|
||||
sequence as part of the sparse attention mechanism. The proposed sparse attention can handle sequences of length up to
|
||||
8x of what was previously possible using similar hardware. As a consequence of the capability to handle longer context,
|
||||
BigBird drastically improves performance on various NLP tasks such as question answering and summarization. We also
|
||||
propose novel applications to genomics data.*
|
||||
|
||||
Tips:
|
||||
|
||||
- For an in-detail explanation on how BigBird's attention works, see [this blog post](https://huggingface.co/blog/big-bird).
|
||||
- BigBird comes with 2 implementations: **original_full** & **block_sparse**. For the sequence length < 1024, using
|
||||
**original_full** is advised as there is no benefit in using **block_sparse** attention.
|
||||
- The code currently uses window size of 3 blocks and 2 global blocks.
|
||||
- Sequence length must be divisible by block size.
|
||||
- Current implementation supports only **ITC**.
|
||||
- Current implementation doesn't support **num_random_blocks = 0**.
|
||||
- BigBirdPegasus uses the [PegasusTokenizer](https://github.com/huggingface/transformers/blob/main/src/transformers/models/pegasus/tokenization_pegasus.py).
|
||||
|
||||
The original code can be found [here](https://github.com/google-research/bigbird).
|
||||
|
||||
## BigBirdPegasusConfig
|
||||
|
||||
[[autodoc]] BigBirdPegasusConfig
|
||||
- all
|
||||
|
||||
## BigBirdPegasusModel
|
||||
|
||||
[[autodoc]] BigBirdPegasusModel
|
||||
- forward
|
||||
|
||||
## BigBirdPegasusForConditionalGeneration
|
||||
|
||||
[[autodoc]] BigBirdPegasusForConditionalGeneration
|
||||
- forward
|
||||
|
||||
## BigBirdPegasusForSequenceClassification
|
||||
|
||||
[[autodoc]] BigBirdPegasusForSequenceClassification
|
||||
- forward
|
||||
|
||||
## BigBirdPegasusForQuestionAnswering
|
||||
|
||||
[[autodoc]] BigBirdPegasusForQuestionAnswering
|
||||
- forward
|
||||
|
||||
## BigBirdPegasusForCausalLM
|
||||
|
||||
[[autodoc]] BigBirdPegasusForCausalLM
|
||||
- forward
|
||||
95
docs/source/en/model_doc/blenderbot-small.mdx
Normal file
95
docs/source/en/model_doc/blenderbot-small.mdx
Normal file
@ -0,0 +1,95 @@
|
||||
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
# Blenderbot Small
|
||||
|
||||
Note that [`BlenderbotSmallModel`] and
|
||||
[`BlenderbotSmallForConditionalGeneration`] are only used in combination with the checkpoint
|
||||
[facebook/blenderbot-90M](https://huggingface.co/facebook/blenderbot-90M). Larger Blenderbot checkpoints should
|
||||
instead be used with [`BlenderbotModel`] and
|
||||
[`BlenderbotForConditionalGeneration`]
|
||||
|
||||
## Overview
|
||||
|
||||
The Blender chatbot model was proposed in [Recipes for building an open-domain chatbot](https://arxiv.org/pdf/2004.13637.pdf) Stephen Roller, Emily Dinan, Naman Goyal, Da Ju, Mary Williamson, Yinhan Liu,
|
||||
Jing Xu, Myle Ott, Kurt Shuster, Eric M. Smith, Y-Lan Boureau, Jason Weston on 30 Apr 2020.
|
||||
|
||||
The abstract of the paper is the following:
|
||||
|
||||
*Building open-domain chatbots is a challenging area for machine learning research. While prior work has shown that
|
||||
scaling neural models in the number of parameters and the size of the data they are trained on gives improved results,
|
||||
we show that other ingredients are important for a high-performing chatbot. Good conversation requires a number of
|
||||
skills that an expert conversationalist blends in a seamless way: providing engaging talking points and listening to
|
||||
their partners, and displaying knowledge, empathy and personality appropriately, while maintaining a consistent
|
||||
persona. We show that large scale models can learn these skills when given appropriate training data and choice of
|
||||
generation strategy. We build variants of these recipes with 90M, 2.7B and 9.4B parameter models, and make our models
|
||||
and code publicly available. Human evaluations show our best models are superior to existing approaches in multi-turn
|
||||
dialogue in terms of engagingness and humanness measurements. We then discuss the limitations of this work by analyzing
|
||||
failure cases of our models.*
|
||||
|
||||
This model was contributed by [patrickvonplaten](https://huggingface.co/patrickvonplaten). The authors' code can be
|
||||
found [here](https://github.com/facebookresearch/ParlAI) .
|
||||
|
||||
## BlenderbotSmallConfig
|
||||
|
||||
[[autodoc]] BlenderbotSmallConfig
|
||||
|
||||
## BlenderbotSmallTokenizer
|
||||
|
||||
[[autodoc]] BlenderbotSmallTokenizer
|
||||
- build_inputs_with_special_tokens
|
||||
- get_special_tokens_mask
|
||||
- create_token_type_ids_from_sequences
|
||||
- save_vocabulary
|
||||
|
||||
## BlenderbotSmallTokenizerFast
|
||||
|
||||
[[autodoc]] BlenderbotSmallTokenizerFast
|
||||
|
||||
## BlenderbotSmallModel
|
||||
|
||||
[[autodoc]] BlenderbotSmallModel
|
||||
- forward
|
||||
|
||||
## BlenderbotSmallForConditionalGeneration
|
||||
|
||||
[[autodoc]] BlenderbotSmallForConditionalGeneration
|
||||
- forward
|
||||
|
||||
## BlenderbotSmallForCausalLM
|
||||
|
||||
[[autodoc]] BlenderbotSmallForCausalLM
|
||||
- forward
|
||||
|
||||
## TFBlenderbotSmallModel
|
||||
|
||||
[[autodoc]] TFBlenderbotSmallModel
|
||||
- call
|
||||
|
||||
## TFBlenderbotSmallForConditionalGeneration
|
||||
|
||||
[[autodoc]] TFBlenderbotSmallForConditionalGeneration
|
||||
- call
|
||||
|
||||
## FlaxBlenderbotSmallModel
|
||||
|
||||
[[autodoc]] FlaxBlenderbotSmallModel
|
||||
- __call__
|
||||
- encode
|
||||
- decode
|
||||
|
||||
## FlaxBlenderbotForConditionalGeneration
|
||||
|
||||
[[autodoc]] FlaxBlenderbotSmallForConditionalGeneration
|
||||
- __call__
|
||||
- encode
|
||||
- decode
|
||||
119
docs/source/en/model_doc/blenderbot.mdx
Normal file
119
docs/source/en/model_doc/blenderbot.mdx
Normal file
@ -0,0 +1,119 @@
|
||||
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
# Blenderbot
|
||||
|
||||
**DISCLAIMER:** If you see something strange, file a [Github Issue](https://github.com/huggingface/transformers/issues/new?assignees=&labels=&template=bug-report.md&title) .
|
||||
|
||||
## Overview
|
||||
|
||||
The Blender chatbot model was proposed in [Recipes for building an open-domain chatbot](https://arxiv.org/pdf/2004.13637.pdf) Stephen Roller, Emily Dinan, Naman Goyal, Da Ju, Mary Williamson, Yinhan Liu,
|
||||
Jing Xu, Myle Ott, Kurt Shuster, Eric M. Smith, Y-Lan Boureau, Jason Weston on 30 Apr 2020.
|
||||
|
||||
The abstract of the paper is the following:
|
||||
|
||||
*Building open-domain chatbots is a challenging area for machine learning research. While prior work has shown that
|
||||
scaling neural models in the number of parameters and the size of the data they are trained on gives improved results,
|
||||
we show that other ingredients are important for a high-performing chatbot. Good conversation requires a number of
|
||||
skills that an expert conversationalist blends in a seamless way: providing engaging talking points and listening to
|
||||
their partners, and displaying knowledge, empathy and personality appropriately, while maintaining a consistent
|
||||
persona. We show that large scale models can learn these skills when given appropriate training data and choice of
|
||||
generation strategy. We build variants of these recipes with 90M, 2.7B and 9.4B parameter models, and make our models
|
||||
and code publicly available. Human evaluations show our best models are superior to existing approaches in multi-turn
|
||||
dialogue in terms of engagingness and humanness measurements. We then discuss the limitations of this work by analyzing
|
||||
failure cases of our models.*
|
||||
|
||||
This model was contributed by [sshleifer](https://huggingface.co/sshleifer). The authors' code can be found [here](https://github.com/facebookresearch/ParlAI) .
|
||||
|
||||
|
||||
## Implementation Notes
|
||||
|
||||
- Blenderbot uses a standard [seq2seq model transformer](https://arxiv.org/pdf/1706.03762.pdf) based architecture.
|
||||
- Available checkpoints can be found in the [model hub](https://huggingface.co/models?search=blenderbot).
|
||||
- This is the *default* Blenderbot model class. However, some smaller checkpoints, such as
|
||||
`facebook/blenderbot_small_90M`, have a different architecture and consequently should be used with
|
||||
[BlenderbotSmall](blenderbot-small).
|
||||
|
||||
|
||||
## Usage
|
||||
|
||||
Here is an example of model usage:
|
||||
|
||||
```python
|
||||
>>> from transformers import BlenderbotTokenizer, BlenderbotForConditionalGeneration
|
||||
|
||||
>>> mname = "facebook/blenderbot-400M-distill"
|
||||
>>> model = BlenderbotForConditionalGeneration.from_pretrained(mname)
|
||||
>>> tokenizer = BlenderbotTokenizer.from_pretrained(mname)
|
||||
>>> UTTERANCE = "My friends are cool but they eat too many carbs."
|
||||
>>> inputs = tokenizer([UTTERANCE], return_tensors="pt")
|
||||
>>> reply_ids = model.generate(**inputs)
|
||||
>>> print(tokenizer.batch_decode(reply_ids))
|
||||
["<s> That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?</s>"]
|
||||
```
|
||||
|
||||
## BlenderbotConfig
|
||||
|
||||
[[autodoc]] BlenderbotConfig
|
||||
|
||||
## BlenderbotTokenizer
|
||||
|
||||
[[autodoc]] BlenderbotTokenizer
|
||||
- build_inputs_with_special_tokens
|
||||
|
||||
## BlenderbotTokenizerFast
|
||||
|
||||
[[autodoc]] BlenderbotTokenizerFast
|
||||
- build_inputs_with_special_tokens
|
||||
|
||||
## BlenderbotModel
|
||||
|
||||
See `transformers.BartModel` for arguments to *forward* and *generate*
|
||||
|
||||
[[autodoc]] BlenderbotModel
|
||||
- forward
|
||||
|
||||
## BlenderbotForConditionalGeneration
|
||||
|
||||
See [`~transformers.BartForConditionalGeneration`] for arguments to *forward* and *generate*
|
||||
|
||||
[[autodoc]] BlenderbotForConditionalGeneration
|
||||
- forward
|
||||
|
||||
## BlenderbotForCausalLM
|
||||
|
||||
[[autodoc]] BlenderbotForCausalLM
|
||||
- forward
|
||||
|
||||
## TFBlenderbotModel
|
||||
|
||||
[[autodoc]] TFBlenderbotModel
|
||||
- call
|
||||
|
||||
## TFBlenderbotForConditionalGeneration
|
||||
|
||||
[[autodoc]] TFBlenderbotForConditionalGeneration
|
||||
- call
|
||||
|
||||
## FlaxBlenderbotModel
|
||||
|
||||
[[autodoc]] FlaxBlenderbotModel
|
||||
- __call__
|
||||
- encode
|
||||
- decode
|
||||
|
||||
## FlaxBlenderbotForConditionalGeneration
|
||||
|
||||
[[autodoc]] FlaxBlenderbotForConditionalGeneration
|
||||
- __call__
|
||||
- encode
|
||||
- decode
|
||||
@ -1,22 +1,20 @@
|
||||
..
|
||||
Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
BORT
|
||||
-----------------------------------------------------------------------------------------------------------------------
|
||||
# BORT
|
||||
|
||||
Overview
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
## Overview
|
||||
|
||||
The BORT model was proposed in `Optimal Subarchitecture Extraction for BERT <https://arxiv.org/abs/2010.10499>`__ by
|
||||
The BORT model was proposed in [Optimal Subarchitecture Extraction for BERT](https://arxiv.org/abs/2010.10499) by
|
||||
Adrian de Wynter and Daniel J. Perry. It is an optimal subset of architectural parameters for the BERT, which the
|
||||
authors refer to as "Bort".
|
||||
|
||||
@ -34,14 +32,11 @@ absolute, with respect to BERT-large, on multiple public natural language unders
|
||||
|
||||
Tips:
|
||||
|
||||
- BORT's model architecture is based on BERT, so one can refer to :doc:`BERT's documentation page <bert>` for the
|
||||
- BORT's model architecture is based on BERT, so one can refer to [BERT's documentation page](bert) for the
|
||||
model's API as well as usage examples.
|
||||
- BORT uses the RoBERTa tokenizer instead of the BERT tokenizer, so one can refer to :doc:`RoBERTa's documentation page
|
||||
<roberta>` for the tokenizer's API as well as usage examples.
|
||||
- BORT requires a specific fine-tuning algorithm, called `Agora
|
||||
<https://adewynter.github.io/notes/bort_algorithms_and_applications.html#fine-tuning-with-algebraic-topology>`__ ,
|
||||
- BORT uses the RoBERTa tokenizer instead of the BERT tokenizer, so one can refer to [RoBERTa's documentation page](roberta) for the tokenizer's API as well as usage examples.
|
||||
- BORT requires a specific fine-tuning algorithm, called [Agora](https://adewynter.github.io/notes/bort_algorithms_and_applications.html#fine-tuning-with-algebraic-topology) ,
|
||||
that is sadly not open-sourced yet. It would be very useful for the community, if someone tries to implement the
|
||||
algorithm to make BORT fine-tuning work.
|
||||
|
||||
This model was contributed by `stefan-it <https://huggingface.co/stefan-it>`__. The original code can be found `here
|
||||
<https://github.com/alexa/bort/>`__.
|
||||
This model was contributed by [stefan-it](https://huggingface.co/stefan-it). The original code can be found [here](https://github.com/alexa/bort/).
|
||||
86
docs/source/en/model_doc/byt5.mdx
Normal file
86
docs/source/en/model_doc/byt5.mdx
Normal file
@ -0,0 +1,86 @@
|
||||
<!--Copyright 2021 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
# ByT5
|
||||
|
||||
## Overview
|
||||
|
||||
The ByT5 model was presented in [ByT5: Towards a token-free future with pre-trained byte-to-byte models](https://arxiv.org/abs/2105.13626) by Linting Xue, Aditya Barua, Noah Constant, Rami Al-Rfou, Sharan Narang, Mihir
|
||||
Kale, Adam Roberts, Colin Raffel.
|
||||
|
||||
The abstract from the paper is the following:
|
||||
|
||||
*Most widely-used pre-trained language models operate on sequences of tokens corresponding to word or subword units.
|
||||
Encoding text as a sequence of tokens requires a tokenizer, which is typically created as an independent artifact from
|
||||
the model. Token-free models that instead operate directly on raw text (bytes or characters) have many benefits: they
|
||||
can process text in any language out of the box, they are more robust to noise, and they minimize technical debt by
|
||||
removing complex and error-prone text preprocessing pipelines. Since byte or character sequences are longer than token
|
||||
sequences, past work on token-free models has often introduced new model architectures designed to amortize the cost of
|
||||
operating directly on raw text. In this paper, we show that a standard Transformer architecture can be used with
|
||||
minimal modifications to process byte sequences. We carefully characterize the trade-offs in terms of parameter count,
|
||||
training FLOPs, and inference speed, and show that byte-level models are competitive with their token-level
|
||||
counterparts. We also demonstrate that byte-level models are significantly more robust to noise and perform better on
|
||||
tasks that are sensitive to spelling and pronunciation. As part of our contribution, we release a new set of
|
||||
pre-trained byte-level Transformer models based on the T5 architecture, as well as all code and data used in our
|
||||
experiments.*
|
||||
|
||||
This model was contributed by [patrickvonplaten](https://huggingface.co/patrickvonplaten). The original code can be
|
||||
found [here](https://github.com/google-research/byt5).
|
||||
|
||||
ByT5's architecture is based on the T5v1.1 model, so one can refer to [T5v1.1's documentation page](t5v1.1). They
|
||||
only differ in how inputs should be prepared for the model, see the code examples below.
|
||||
|
||||
Since ByT5 was pre-trained unsupervisedly, there's no real advantage to using a task prefix during single-task
|
||||
fine-tuning. If you are doing multi-task fine-tuning, you should use a prefix.
|
||||
|
||||
|
||||
### Example
|
||||
|
||||
ByT5 works on raw UTF-8 bytes, so it can be used without a tokenizer:
|
||||
|
||||
```python
|
||||
from transformers import T5ForConditionalGeneration
|
||||
import torch
|
||||
|
||||
model = T5ForConditionalGeneration.from_pretrained("google/byt5-small")
|
||||
|
||||
input_ids = torch.tensor([list("Life is like a box of chocolates.".encode("utf-8"))]) + 3 # add 3 for special tokens
|
||||
labels = (
|
||||
torch.tensor([list("La vie est comme une boîte de chocolat.".encode("utf-8"))]) + 3
|
||||
) # add 3 for special tokens
|
||||
|
||||
loss = model(input_ids, labels=labels).loss # forward pass
|
||||
```
|
||||
|
||||
For batched inference and training it is however recommended to make use of the tokenizer:
|
||||
|
||||
```python
|
||||
from transformers import T5ForConditionalGeneration, AutoTokenizer
|
||||
|
||||
model = T5ForConditionalGeneration.from_pretrained("google/byt5-small")
|
||||
tokenizer = AutoTokenizer.from_pretrained("google/byt5-small")
|
||||
|
||||
model_inputs = tokenizer(
|
||||
["Life is like a box of chocolates.", "Today is Monday."], padding="longest", return_tensors="pt"
|
||||
)
|
||||
labels = tokenizer(
|
||||
["La vie est comme une boîte de chocolat.", "Aujourd'hui c'est lundi."], padding="longest", return_tensors="pt"
|
||||
).input_ids
|
||||
|
||||
loss = model(**model_inputs, labels=labels).loss # forward pass
|
||||
```
|
||||
|
||||
## ByT5Tokenizer
|
||||
|
||||
[[autodoc]] ByT5Tokenizer
|
||||
|
||||
See [`ByT5Tokenizer`] for all details.
|
||||
110
docs/source/en/model_doc/camembert.mdx
Normal file
110
docs/source/en/model_doc/camembert.mdx
Normal file
@ -0,0 +1,110 @@
|
||||
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
# CamemBERT
|
||||
|
||||
## Overview
|
||||
|
||||
The CamemBERT model was proposed in [CamemBERT: a Tasty French Language Model](https://arxiv.org/abs/1911.03894) by
|
||||
Louis Martin, Benjamin Muller, Pedro Javier Ortiz Suárez, Yoann Dupont, Laurent Romary, Éric Villemonte de la
|
||||
Clergerie, Djamé Seddah, and Benoît Sagot. It is based on Facebook's RoBERTa model released in 2019. It is a model
|
||||
trained on 138GB of French text.
|
||||
|
||||
The abstract from the paper is the following:
|
||||
|
||||
*Pretrained language models are now ubiquitous in Natural Language Processing. Despite their success, most available
|
||||
models have either been trained on English data or on the concatenation of data in multiple languages. This makes
|
||||
practical use of such models --in all languages except English-- very limited. Aiming to address this issue for French,
|
||||
we release CamemBERT, a French version of the Bi-directional Encoders for Transformers (BERT). We measure the
|
||||
performance of CamemBERT compared to multilingual models in multiple downstream tasks, namely part-of-speech tagging,
|
||||
dependency parsing, named-entity recognition, and natural language inference. CamemBERT improves the state of the art
|
||||
for most of the tasks considered. We release the pretrained model for CamemBERT hoping to foster research and
|
||||
downstream applications for French NLP.*
|
||||
|
||||
Tips:
|
||||
|
||||
- This implementation is the same as RoBERTa. Refer to the [documentation of RoBERTa](roberta) for usage examples
|
||||
as well as the information relative to the inputs and outputs.
|
||||
|
||||
This model was contributed by [camembert](https://huggingface.co/camembert). The original code can be found [here](https://camembert-model.fr/).
|
||||
|
||||
## CamembertConfig
|
||||
|
||||
[[autodoc]] CamembertConfig
|
||||
|
||||
## CamembertTokenizer
|
||||
|
||||
[[autodoc]] CamembertTokenizer
|
||||
- build_inputs_with_special_tokens
|
||||
- get_special_tokens_mask
|
||||
- create_token_type_ids_from_sequences
|
||||
- save_vocabulary
|
||||
|
||||
## CamembertTokenizerFast
|
||||
|
||||
[[autodoc]] CamembertTokenizerFast
|
||||
|
||||
## CamembertModel
|
||||
|
||||
[[autodoc]] CamembertModel
|
||||
|
||||
## CamembertForCausalLM
|
||||
|
||||
[[autodoc]] CamembertForCausalLM
|
||||
|
||||
## CamembertForMaskedLM
|
||||
|
||||
[[autodoc]] CamembertForMaskedLM
|
||||
|
||||
## CamembertForSequenceClassification
|
||||
|
||||
[[autodoc]] CamembertForSequenceClassification
|
||||
|
||||
## CamembertForMultipleChoice
|
||||
|
||||
[[autodoc]] CamembertForMultipleChoice
|
||||
|
||||
## CamembertForTokenClassification
|
||||
|
||||
[[autodoc]] CamembertForTokenClassification
|
||||
|
||||
## CamembertForQuestionAnswering
|
||||
|
||||
[[autodoc]] CamembertForQuestionAnswering
|
||||
|
||||
## TFCamembertModel
|
||||
|
||||
[[autodoc]] TFCamembertModel
|
||||
|
||||
## TFCamembertForCasualLM
|
||||
|
||||
[[autodoc]] TFCamembertForCausalLM
|
||||
|
||||
## TFCamembertForMaskedLM
|
||||
|
||||
[[autodoc]] TFCamembertForMaskedLM
|
||||
|
||||
## TFCamembertForSequenceClassification
|
||||
|
||||
[[autodoc]] TFCamembertForSequenceClassification
|
||||
|
||||
## TFCamembertForMultipleChoice
|
||||
|
||||
[[autodoc]] TFCamembertForMultipleChoice
|
||||
|
||||
## TFCamembertForTokenClassification
|
||||
|
||||
[[autodoc]] TFCamembertForTokenClassification
|
||||
|
||||
## TFCamembertForQuestionAnswering
|
||||
|
||||
[[autodoc]] TFCamembertForQuestionAnswering
|
||||
133
docs/source/en/model_doc/canine.mdx
Normal file
133
docs/source/en/model_doc/canine.mdx
Normal file
@ -0,0 +1,133 @@
|
||||
<!--Copyright 2021 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
# CANINE
|
||||
|
||||
## Overview
|
||||
|
||||
The CANINE model was proposed in [CANINE: Pre-training an Efficient Tokenization-Free Encoder for Language
|
||||
Representation](https://arxiv.org/abs/2103.06874) by Jonathan H. Clark, Dan Garrette, Iulia Turc, John Wieting. It's
|
||||
among the first papers that trains a Transformer without using an explicit tokenization step (such as Byte Pair
|
||||
Encoding (BPE), WordPiece or SentencePiece). Instead, the model is trained directly at a Unicode character-level.
|
||||
Training at a character-level inevitably comes with a longer sequence length, which CANINE solves with an efficient
|
||||
downsampling strategy, before applying a deep Transformer encoder.
|
||||
|
||||
The abstract from the paper is the following:
|
||||
|
||||
*Pipelined NLP systems have largely been superseded by end-to-end neural modeling, yet nearly all commonly-used models
|
||||
still require an explicit tokenization step. While recent tokenization approaches based on data-derived subword
|
||||
lexicons are less brittle than manually engineered tokenizers, these techniques are not equally suited to all
|
||||
languages, and the use of any fixed vocabulary may limit a model's ability to adapt. In this paper, we present CANINE,
|
||||
a neural encoder that operates directly on character sequences, without explicit tokenization or vocabulary, and a
|
||||
pre-training strategy that operates either directly on characters or optionally uses subwords as a soft inductive bias.
|
||||
To use its finer-grained input effectively and efficiently, CANINE combines downsampling, which reduces the input
|
||||
sequence length, with a deep transformer stack, which encodes context. CANINE outperforms a comparable mBERT model by
|
||||
2.8 F1 on TyDi QA, a challenging multilingual benchmark, despite having 28% fewer model parameters.*
|
||||
|
||||
Tips:
|
||||
|
||||
- CANINE uses no less than 3 Transformer encoders internally: 2 "shallow" encoders (which only consist of a single
|
||||
layer) and 1 "deep" encoder (which is a regular BERT encoder). First, a "shallow" encoder is used to contextualize
|
||||
the character embeddings, using local attention. Next, after downsampling, a "deep" encoder is applied. Finally,
|
||||
after upsampling, a "shallow" encoder is used to create the final character embeddings. Details regarding up- and
|
||||
downsampling can be found in the paper.
|
||||
- CANINE uses a max sequence length of 2048 characters by default. One can use [`CanineTokenizer`]
|
||||
to prepare text for the model.
|
||||
- Classification can be done by placing a linear layer on top of the final hidden state of the special [CLS] token
|
||||
(which has a predefined Unicode code point). For token classification tasks however, the downsampled sequence of
|
||||
tokens needs to be upsampled again to match the length of the original character sequence (which is 2048). The
|
||||
details for this can be found in the paper.
|
||||
- Models:
|
||||
|
||||
- [google/canine-c](https://huggingface.co/google/canine-c): Pre-trained with autoregressive character loss,
|
||||
12-layer, 768-hidden, 12-heads, 121M parameters (size ~500 MB).
|
||||
- [google/canine-s](https://huggingface.co/google/canine-s): Pre-trained with subword loss, 12-layer,
|
||||
768-hidden, 12-heads, 121M parameters (size ~500 MB).
|
||||
|
||||
This model was contributed by [nielsr](https://huggingface.co/nielsr). The original code can be found [here](https://github.com/google-research/language/tree/master/language/canine).
|
||||
|
||||
|
||||
### Example
|
||||
|
||||
CANINE works on raw characters, so it can be used without a tokenizer:
|
||||
|
||||
```python
|
||||
>>> from transformers import CanineModel
|
||||
>>> import torch
|
||||
|
||||
>>> model = CanineModel.from_pretrained("google/canine-c") # model pre-trained with autoregressive character loss
|
||||
|
||||
>>> text = "hello world"
|
||||
>>> # use Python's built-in ord() function to turn each character into its unicode code point id
|
||||
>>> input_ids = torch.tensor([[ord(char) for char in text]])
|
||||
|
||||
>>> outputs = model(input_ids) # forward pass
|
||||
>>> pooled_output = outputs.pooler_output
|
||||
>>> sequence_output = outputs.last_hidden_state
|
||||
```
|
||||
|
||||
For batched inference and training, it is however recommended to make use of the tokenizer (to pad/truncate all
|
||||
sequences to the same length):
|
||||
|
||||
```python
|
||||
>>> from transformers import CanineTokenizer, CanineModel
|
||||
|
||||
>>> model = CanineModel.from_pretrained("google/canine-c")
|
||||
>>> tokenizer = CanineTokenizer.from_pretrained("google/canine-c")
|
||||
|
||||
>>> inputs = ["Life is like a box of chocolates.", "You never know what you gonna get."]
|
||||
>>> encoding = tokenizer(inputs, padding="longest", truncation=True, return_tensors="pt")
|
||||
|
||||
>>> outputs = model(**encoding) # forward pass
|
||||
>>> pooled_output = outputs.pooler_output
|
||||
>>> sequence_output = outputs.last_hidden_state
|
||||
```
|
||||
|
||||
## CANINE specific outputs
|
||||
|
||||
[[autodoc]] models.canine.modeling_canine.CanineModelOutputWithPooling
|
||||
|
||||
## CanineConfig
|
||||
|
||||
[[autodoc]] CanineConfig
|
||||
|
||||
## CanineTokenizer
|
||||
|
||||
[[autodoc]] CanineTokenizer
|
||||
- build_inputs_with_special_tokens
|
||||
- get_special_tokens_mask
|
||||
- create_token_type_ids_from_sequences
|
||||
|
||||
## CanineModel
|
||||
|
||||
[[autodoc]] CanineModel
|
||||
- forward
|
||||
|
||||
## CanineForSequenceClassification
|
||||
|
||||
[[autodoc]] CanineForSequenceClassification
|
||||
- forward
|
||||
|
||||
## CanineForMultipleChoice
|
||||
|
||||
[[autodoc]] CanineForMultipleChoice
|
||||
- forward
|
||||
|
||||
## CanineForTokenClassification
|
||||
|
||||
[[autodoc]] CanineForTokenClassification
|
||||
- forward
|
||||
|
||||
## CanineForQuestionAnswering
|
||||
|
||||
[[autodoc]] CanineForQuestionAnswering
|
||||
- forward
|
||||
160
docs/source/en/model_doc/clip.mdx
Normal file
160
docs/source/en/model_doc/clip.mdx
Normal file
@ -0,0 +1,160 @@
|
||||
<!--Copyright 2021 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
# CLIP
|
||||
|
||||
## Overview
|
||||
|
||||
The CLIP model was proposed in [Learning Transferable Visual Models From Natural Language Supervision](https://arxiv.org/abs/2103.00020) by Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh,
|
||||
Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, Ilya Sutskever. CLIP
|
||||
(Contrastive Language-Image Pre-Training) is a neural network trained on a variety of (image, text) pairs. It can be
|
||||
instructed in natural language to predict the most relevant text snippet, given an image, without directly optimizing
|
||||
for the task, similarly to the zero-shot capabilities of GPT-2 and 3.
|
||||
|
||||
The abstract from the paper is the following:
|
||||
|
||||
*State-of-the-art computer vision systems are trained to predict a fixed set of predetermined object categories. This
|
||||
restricted form of supervision limits their generality and usability since additional labeled data is needed to specify
|
||||
any other visual concept. Learning directly from raw text about images is a promising alternative which leverages a
|
||||
much broader source of supervision. We demonstrate that the simple pre-training task of predicting which caption goes
|
||||
with which image is an efficient and scalable way to learn SOTA image representations from scratch on a dataset of 400
|
||||
million (image, text) pairs collected from the internet. After pre-training, natural language is used to reference
|
||||
learned visual concepts (or describe new ones) enabling zero-shot transfer of the model to downstream tasks. We study
|
||||
the performance of this approach by benchmarking on over 30 different existing computer vision datasets, spanning tasks
|
||||
such as OCR, action recognition in videos, geo-localization, and many types of fine-grained object classification. The
|
||||
model transfers non-trivially to most tasks and is often competitive with a fully supervised baseline without the need
|
||||
for any dataset specific training. For instance, we match the accuracy of the original ResNet-50 on ImageNet zero-shot
|
||||
without needing to use any of the 1.28 million training examples it was trained on. We release our code and pre-trained
|
||||
model weights at this https URL.*
|
||||
|
||||
## Usage
|
||||
|
||||
CLIP is a multi-modal vision and language model. It can be used for image-text similarity and for zero-shot image
|
||||
classification. CLIP uses a ViT like transformer to get visual features and a causal language model to get the text
|
||||
features. Both the text and visual features are then projected to a latent space with identical dimension. The dot
|
||||
product between the projected image and text features is then used as a similar score.
|
||||
|
||||
To feed images to the Transformer encoder, each image is split into a sequence of fixed-size non-overlapping patches,
|
||||
which are then linearly embedded. A [CLS] token is added to serve as representation of an entire image. The authors
|
||||
also add absolute position embeddings, and feed the resulting sequence of vectors to a standard Transformer encoder.
|
||||
The [`CLIPFeatureExtractor`] can be used to resize (or rescale) and normalize images for the model.
|
||||
|
||||
The [`CLIPTokenizer`] is used to encode the text. The [`CLIPProcessor`] wraps
|
||||
[`CLIPFeatureExtractor`] and [`CLIPTokenizer`] into a single instance to both
|
||||
encode the text and prepare the images. The following example shows how to get the image-text similarity scores using
|
||||
[`CLIPProcessor`] and [`CLIPModel`].
|
||||
|
||||
|
||||
```python
|
||||
>>> from PIL import Image
|
||||
>>> import requests
|
||||
|
||||
>>> from transformers import CLIPProcessor, CLIPModel
|
||||
|
||||
>>> model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
|
||||
>>> processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
|
||||
|
||||
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
||||
>>> image = Image.open(requests.get(url, stream=True).raw)
|
||||
|
||||
>>> inputs = processor(text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="pt", padding=True)
|
||||
|
||||
>>> outputs = model(**inputs)
|
||||
>>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score
|
||||
>>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities
|
||||
```
|
||||
|
||||
This model was contributed by [valhalla](https://huggingface.co/valhalla). The original code can be found [here](https://github.com/openai/CLIP).
|
||||
|
||||
## CLIPConfig
|
||||
|
||||
[[autodoc]] CLIPConfig
|
||||
- from_text_vision_configs
|
||||
|
||||
## CLIPTextConfig
|
||||
|
||||
[[autodoc]] CLIPTextConfig
|
||||
|
||||
## CLIPVisionConfig
|
||||
|
||||
[[autodoc]] CLIPVisionConfig
|
||||
|
||||
## CLIPTokenizer
|
||||
|
||||
[[autodoc]] CLIPTokenizer
|
||||
- build_inputs_with_special_tokens
|
||||
- get_special_tokens_mask
|
||||
- create_token_type_ids_from_sequences
|
||||
- save_vocabulary
|
||||
|
||||
## CLIPTokenizerFast
|
||||
|
||||
[[autodoc]] CLIPTokenizerFast
|
||||
|
||||
## CLIPFeatureExtractor
|
||||
|
||||
[[autodoc]] CLIPFeatureExtractor
|
||||
|
||||
## CLIPProcessor
|
||||
|
||||
[[autodoc]] CLIPProcessor
|
||||
|
||||
## CLIPModel
|
||||
|
||||
[[autodoc]] CLIPModel
|
||||
- forward
|
||||
- get_text_features
|
||||
- get_image_features
|
||||
|
||||
## CLIPTextModel
|
||||
|
||||
[[autodoc]] CLIPTextModel
|
||||
- forward
|
||||
|
||||
## CLIPVisionModel
|
||||
|
||||
[[autodoc]] CLIPVisionModel
|
||||
- forward
|
||||
|
||||
## TFCLIPModel
|
||||
|
||||
[[autodoc]] TFCLIPModel
|
||||
- call
|
||||
- get_text_features
|
||||
- get_image_features
|
||||
|
||||
## TFCLIPTextModel
|
||||
|
||||
[[autodoc]] TFCLIPTextModel
|
||||
- call
|
||||
|
||||
## TFCLIPVisionModel
|
||||
|
||||
[[autodoc]] TFCLIPVisionModel
|
||||
- call
|
||||
|
||||
## FlaxCLIPModel
|
||||
|
||||
[[autodoc]] FlaxCLIPModel
|
||||
- __call__
|
||||
- get_text_features
|
||||
- get_image_features
|
||||
|
||||
## FlaxCLIPTextModel
|
||||
|
||||
[[autodoc]] FlaxCLIPTextModel
|
||||
- __call__
|
||||
|
||||
## FlaxCLIPVisionModel
|
||||
|
||||
[[autodoc]] FlaxCLIPVisionModel
|
||||
- __call__
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user