mirror of
https://github.com/huggingface/transformers.git
synced 2025-11-02 01:54:35 +08:00
Compare commits
692 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 64e78564a5 | |||
| fd6204b2a7 | |||
| 5c1cda9d3c | |||
| 77f4c46b50 | |||
| 6797cdc077 | |||
| f063c56d94 | |||
| 8719afa1ad | |||
| 4ce6bcc310 | |||
| f13f1f8fb8 | |||
| d9b286272c | |||
| a135f59536 | |||
| b3429ab678 | |||
| 901153c61e | |||
| 64232bc0df | |||
| 024cd19bb7 | |||
| 9120ae7d66 | |||
| dcb0e61430 | |||
| 05a930671f | |||
| ef8d32c5ea | |||
| 575c979144 | |||
| f7f872955d | |||
| 80da304a0f | |||
| 1a0b41781d | |||
| f785c51692 | |||
| 7e406f4a65 | |||
| f2ffcaf49f | |||
| ba0d50f214 | |||
| cd9b8d7efe | |||
| da37eb8e43 | |||
| 39084ca663 | |||
| e7bff0aabe | |||
| dc3f6758cf | |||
| 6f40e31766 | |||
| 33fd83bc01 | |||
| 619200cc42 | |||
| 44c5621db0 | |||
| 7eee950ac3 | |||
| cf409e5594 | |||
| f594090a93 | |||
| 079557c1c5 | |||
| c1780ce7a4 | |||
| 864c1dfe34 | |||
| 3e3e41ae20 | |||
| 8fa8e19429 | |||
| 83e59d8e0b | |||
| bf0dfa98d3 | |||
| c065025c47 | |||
| 6b241e0e3b | |||
| 0afe4a90f9 | |||
| 226e74b610 | |||
| 084a187da3 | |||
| 2ce0fb84cc | |||
| 09b0bcfea9 | |||
| c40c7e213b | |||
| 7c622482e8 | |||
| fe82b1bfa0 | |||
| f4c9a7e62e | |||
| 1e8e06862f | |||
| 87dd1a00ef | |||
| a721a5eefd | |||
| f3cf8ae7b3 | |||
| 6a11e4c2ad | |||
| 1c86157d9d | |||
| c448c01f25 | |||
| 623281aa12 | |||
| a5d2967bd8 | |||
| 980208650a | |||
| 4e7bf94e72 | |||
| 282f3ac3ef | |||
| 804c2974d5 | |||
| bc80f8bc37 | |||
| 84326a28f8 | |||
| af0692a2ca | |||
| 57c8e822f7 | |||
| 20d6931e32 | |||
| 8b945ef03e | |||
| c2cd02ac62 | |||
| 30ede8994e | |||
| db9dd09cf9 | |||
| 76116f479b | |||
| 58c789e3d2 | |||
| 022a1e9e67 | |||
| e0db8276a6 | |||
| b43e3f93ac | |||
| f37f2adb68 | |||
| 60d5bda4fd | |||
| b29eb247d3 | |||
| d6ec54ba36 | |||
| ad1f7bef13 | |||
| f748bd4242 | |||
| 3f6add8bab | |||
| c0eb218a55 | |||
| 2d27900b5d | |||
| 8d43c71a1c | |||
| 7ceff67e1a | |||
| 88ac60f7b5 | |||
| 741d48f5c7 | |||
| bc2571e61c | |||
| 0661abc545 | |||
| 1d30ec95c7 | |||
| 7959d83599 | |||
| b03b2a653d | |||
| ce11318e7e | |||
| a753cafdc0 | |||
| 6715e3b6a1 | |||
| ab2cabb964 | |||
| b24ead87e1 | |||
| e3e70f9551 | |||
| d7633a4e46 | |||
| 38a716cd41 | |||
| 4bd6b54fa4 | |||
| c1625b3261 | |||
| 4b72cfd958 | |||
| 32dbb2d954 | |||
| 04ab2ca639 | |||
| 30f065890e | |||
| 35cd8eed88 | |||
| f45cb66bf6 | |||
| 52166f672e | |||
| 9cac4fab07 | |||
| b7fc043fce | |||
| 81a6c7cd39 | |||
| 195bfd118a | |||
| 1ef152eb48 | |||
| e3ff165aa5 | |||
| ca6b80cadb | |||
| 3951fc55ee | |||
| bd41a0f74d | |||
| 1811883e80 | |||
| 5c00918681 | |||
| 50f4539b82 | |||
| bf2e0cf70b | |||
| 7bc86bea68 | |||
| 74e84f1fa6 | |||
| c3d6f33918 | |||
| a90d3f1862 | |||
| 2dc2d79ac7 | |||
| b48cf7124c | |||
| 8c9b5fcbaf | |||
| 3ed5e97ba0 | |||
| 0f3ad1507e | |||
| 2617396094 | |||
| 881945c0b5 | |||
| 5b5e4ca366 | |||
| 58d8795d74 | |||
| 880154d2e1 | |||
| 6f14eab50b | |||
| ff26f8ee3a | |||
| 5e04d70868 | |||
| 5aaf5aac0b | |||
| 6fe79e57d7 | |||
| 50595a3336 | |||
| ac588594e2 | |||
| 9f72e8f4e1 | |||
| 41f3133a3a | |||
| dabeb15292 | |||
| ca7ff64f5b | |||
| 74712e22f3 | |||
| aad95c7cde | |||
| 95dab34d55 | |||
| f1b938fda8 | |||
| cfd2eaa8cf | |||
| f464f10a2c | |||
| bfd83c17a7 | |||
| c0328a6c26 | |||
| 95037a169f | |||
| 95ffbe1686 | |||
| 3981ce3dd2 | |||
| 5a34d8d982 | |||
| d9c62047a8 | |||
| e783ea7304 | |||
| 92970c0cb9 | |||
| 5254220e7f | |||
| dfc6dd8584 | |||
| 2550b41aa2 | |||
| 6e1ee47b36 | |||
| c3fcba3219 | |||
| aaaed56ffc | |||
| 83206ca6a8 | |||
| 25e1af36e0 | |||
| 63ca402380 | |||
| 075e821d1d | |||
| 4670b57ce9 | |||
| f25444cb22 | |||
| 7fe5aaa8b0 | |||
| 9337c6c668 | |||
| 653076ca30 | |||
| 3d339ee659 | |||
| 1ad7b0398c | |||
| f38cd4373f | |||
| 9d8e8a8703 | |||
| 9fa2995993 | |||
| 3312e96bfb | |||
| edca520d0f | |||
| 893e51a53f | |||
| 81009b7a5c | |||
| 22fa0a6004 | |||
| 896d7be974 | |||
| 823df93955 | |||
| 0cd89d8c83 | |||
| 7c205bf40c | |||
| d49d3cf6d6 | |||
| f243a5ec0d | |||
| 74d7c24d8d | |||
| 38a10c6b52 | |||
| 9f1260971f | |||
| cb251ba619 | |||
| 0c6fcd3034 | |||
| ef102c4886 | |||
| 623cd6aef9 | |||
| a99f7f5c75 | |||
| 26212c14e5 | |||
| 716120cbd6 | |||
| 6f90c29eaa | |||
| 07f0bb691d | |||
| c161dd56df | |||
| fb41f9f50c | |||
| 45fc8c7951 | |||
| 6060746570 | |||
| b9b60c1630 | |||
| 8b78a32be1 | |||
| 0311ba2153 | |||
| 269c9638df | |||
| d31c7b104e | |||
| c2e0fd5283 | |||
| ba8b1f4754 | |||
| 97ccf67bb3 | |||
| 66446909b2 | |||
| 6c40e49712 | |||
| dfed4ec263 | |||
| 9c9b8e707b | |||
| ba2cf5f90d | |||
| 1ed24afe91 | |||
| 02ec02d6d3 | |||
| c6d664849b | |||
| acc851e1ff | |||
| 5bf5d50c8d | |||
| f8e90d6fb9 | |||
| ffe0761777 | |||
| 3fd7eee18f | |||
| 1c15128312 | |||
| 7442801df5 | |||
| c0d97cee13 | |||
| 02f7c2fe66 | |||
| 11505fa139 | |||
| 424419f549 | |||
| c9035e4537 | |||
| 247bed3857 | |||
| 083ad7d46c | |||
| fd338abdeb | |||
| aef4cf8c52 | |||
| 403d530eec | |||
| 520198f56f | |||
| 9853c5dd58 | |||
| 4906a29f7f | |||
| 2a8115f083 | |||
| 76800fb8e6 | |||
| b219d6b5a5 | |||
| 6c1bee7d89 | |||
| f7328de46d | |||
| 6ab7d1a429 | |||
| 2199608ca6 | |||
| 04ceee7d24 | |||
| f05a8a0c5e | |||
| 090e3e6896 | |||
| abb7430003 | |||
| b51b87c41d | |||
| e1c02e018c | |||
| 9f4e0c23d6 | |||
| 6c25f5228e | |||
| eb3479e7cf | |||
| 773e4c7263 | |||
| ef62f038fd | |||
| 6e31014110 | |||
| 3d39226a51 | |||
| b0d49fd536 | |||
| 335c0ca35c | |||
| 34e1bec649 | |||
| e8da77d181 | |||
| f4ad3d8cea | |||
| 57c1749efa | |||
| 30677dc743 | |||
| af6732225c | |||
| c301c26370 | |||
| 838f83d84c | |||
| 455f81711f | |||
| 01068abdb9 | |||
| cd56f3fe7e | |||
| b6dddda4d2 | |||
| acc3bd9d2a | |||
| d0b3797a3b | |||
| a8549bdd82 | |||
| a96edb85c9 | |||
| bf0840accc | |||
| ced7284a60 | |||
| 645f45c462 | |||
| e87505f3a1 | |||
| e031162a6b | |||
| 3e09d813aa | |||
| c32b432a67 | |||
| e3c8443f08 | |||
| 83d38c9ff3 | |||
| 7772ddb473 | |||
| 860264379f | |||
| a04eb8d369 | |||
| 8780caa388 | |||
| 604c085087 | |||
| 6dfd027279 | |||
| 700229f8a4 | |||
| 05c966f24b | |||
| fb7fca718a | |||
| 5057213bcc | |||
| ae6b6963ad | |||
| 4002f95eb6 | |||
| d7b50ce469 | |||
| 06a6fea782 | |||
| cc2366bbb9 | |||
| ddea8771c6 | |||
| b3544e4cc5 | |||
| 4f21e1ddd6 | |||
| b0595d33c1 | |||
| 3c27d246e5 | |||
| 4b2b50aa7b | |||
| 86c6f8a8b1 | |||
| 9856c9213d | |||
| e70068a719 | |||
| f183a7a3c3 | |||
| 4684bfc757 | |||
| 1a3e0c4fe6 | |||
| 5f1491d3b3 | |||
| 1c06240e1b | |||
| 3b20e910b4 | |||
| 3c12e3c1c4 | |||
| 1f5ea9e04a | |||
| f81077fcf3 | |||
| 1aed2b908e | |||
| a735f727cc | |||
| 8c297cdb30 | |||
| d4d4447d53 | |||
| 7ef40120a0 | |||
| fb2b89840b | |||
| 3f48b2bc3e | |||
| 77ffd5edd5 | |||
| bf1f43fbd7 | |||
| 2eb596f085 | |||
| eb330e8904 | |||
| e21f89f64c | |||
| b5b957a65c | |||
| 77bf3fe787 | |||
| 9f8fa4e973 | |||
| a8d4d6776d | |||
| 125ccead71 | |||
| b230181d41 | |||
| 24ab5b08a3 | |||
| 2c6684239f | |||
| 8fb4671811 | |||
| dbfe379514 | |||
| 29904a967b | |||
| 0f226f78ce | |||
| 82b8d8c7b0 | |||
| af6125ffdb | |||
| 5aaf6e1460 | |||
| be87b84276 | |||
| 68b55885ed | |||
| 21e86f99e6 | |||
| 1438c487df | |||
| b9570a813c | |||
| f2b744f690 | |||
| 946400fb68 | |||
| fd1d9f1ab8 | |||
| e8968bd03a | |||
| 117dba9948 | |||
| 427ea3fecb | |||
| 2ae678229f | |||
| 68a3215949 | |||
| 03df3fbcb4 | |||
| e84adbed40 | |||
| dcebe254fa | |||
| 008672e6e5 | |||
| 9352b5151a | |||
| 094afa515d | |||
| 4f3e93cfaf | |||
| ce9724e1bd | |||
| 5f19c07a70 | |||
| af8afdc88d | |||
| 0b98ca368f | |||
| 5c0bf39782 | |||
| 0282e24eef | |||
| 40b049c701 | |||
| 393739194e | |||
| 85a114ef47 | |||
| 3318c246f3 | |||
| cd8c93f701 | |||
| 01c7fb04be | |||
| 0486ccdd3d | |||
| d7e0d59bb7 | |||
| 8715d20c97 | |||
| f20d75a13f | |||
| c83fbc5f2d | |||
| c23248443c | |||
| 73fe40898d | |||
| 2097aa1826 | |||
| 1b5ce1e63b | |||
| c988db5af2 | |||
| 5c02b97ca2 | |||
| a0a027c2ed | |||
| 1449222217 | |||
| d3d388b934 | |||
| b5492582d0 | |||
| 5dcc08f1df | |||
| 813d730c46 | |||
| 9f8619c6aa | |||
| 87d685b8a9 | |||
| 4c379daf64 | |||
| 966ba081c9 | |||
| 58f672e65c | |||
| d41dd5359b | |||
| f5c097fc4d | |||
| d9e693e1d0 | |||
| 6bef764506 | |||
| 3f1714f8a7 | |||
| 6f840990a7 | |||
| 505494a86f | |||
| e12d6f513e | |||
| 339fc51acc | |||
| 4c41c6622c | |||
| fcf10214e0 | |||
| bd8f6cafd4 | |||
| 4c32f9f26e | |||
| fa35cda91e | |||
| 00cad2e5c1 | |||
| e8246f78f9 | |||
| 184ef8ecd0 | |||
| 543d0549f8 | |||
| ea46e3fa9c | |||
| c526bde319 | |||
| fa1a8d102f | |||
| 2f8485199c | |||
| a01ea31b5c | |||
| 9fbb4cdc80 | |||
| fda703a553 | |||
| 3ab6820370 | |||
| a637ae00c4 | |||
| 7e4428749c | |||
| 2adc8c926a | |||
| 055ed78f52 | |||
| 89693e170d | |||
| 6d9e11a193 | |||
| 602d63f05c | |||
| 63c295ac05 | |||
| 27d9e05ce2 | |||
| 053f0197b8 | |||
| 26a33cfd8c | |||
| 49c61a4ae7 | |||
| 1aa9c13f70 | |||
| 2295d783d5 | |||
| d26b37e744 | |||
| efb5c0a453 | |||
| 44f64132a5 | |||
| 6f52fce673 | |||
| 72d9e039f9 | |||
| 0d909f6bd8 | |||
| ac17f71159 | |||
| c19c811a2d | |||
| 20c10258a4 | |||
| 95ab06778c | |||
| 9a06b6b11b | |||
| b6a28e9ac9 | |||
| 546cbe7e9e | |||
| 696e8a4365 | |||
| 3ced9b3eb9 | |||
| 821d518e03 | |||
| 4196bfeda0 | |||
| a8ec52efc2 | |||
| b35e7b68ca | |||
| f284089ec4 | |||
| dfd16af832 | |||
| 917f104502 | |||
| 6f84531e61 | |||
| 5469369480 | |||
| f882966004 | |||
| b880508440 | |||
| 8fd7eb34e2 | |||
| 89b8d4f568 | |||
| 2a737bffef | |||
| d59464db6b | |||
| 3b583d02d6 | |||
| e6ce636e02 | |||
| 9dd054fba2 | |||
| f6e74a63ca | |||
| fd01104435 | |||
| 88a951e3cc | |||
| 90ecc29656 | |||
| defe9e20fe | |||
| 7da995c00c | |||
| 3e056c1003 | |||
| 9f8bc87cbe | |||
| 6b58e15507 | |||
| 395ffcd757 | |||
| 54e55b52d4 | |||
| dc9aaa3848 | |||
| 12b66215cf | |||
| 093b88f4e9 | |||
| c503a1c15e | |||
| 6290169eb3 | |||
| 805c5200dc | |||
| a5bd40b75c | |||
| 745ea78dcc | |||
| f3660613bc | |||
| 948b730f97 | |||
| b70f441b72 | |||
| d064fb5647 | |||
| 188574ac50 | |||
| 801ff969ce | |||
| 39f70a4058 | |||
| 2d2ed2cc18 | |||
| 5dc303e281 | |||
| 1750e62900 | |||
| b013842244 | |||
| 0c2325198f | |||
| 9248e27037 | |||
| a106bde5a7 | |||
| 11655fafdd | |||
| 0234de8418 | |||
| 3c733f3208 | |||
| aeba4f95bb | |||
| 256482ac92 | |||
| aca6288ff4 | |||
| f52a15897b | |||
| 311b7048c5 | |||
| ee04b69822 | |||
| a85eb616f7 | |||
| 98569d4ba2 | |||
| d03695f3a2 | |||
| 7fc686efb1 | |||
| 83d2d55c94 | |||
| 17b6e0d474 | |||
| 26f8b2cb10 | |||
| b040e6efc1 | |||
| 9d14be5c20 | |||
| 88cc26dcd1 | |||
| 63645b3b11 | |||
| cb38ffcc5e | |||
| 9dc7825744 | |||
| 894db6701e | |||
| 55fe80d084 | |||
| 22bd047e91 | |||
| 3591844306 | |||
| bdbb2c756b | |||
| 5f2a3d721c | |||
| cdcdd5f03a | |||
| 2d458b2c7d | |||
| 3437d12134 | |||
| 4a1ab7cb6c | |||
| 23e87c27be | |||
| 83f890ddd1 | |||
| 461e8cacf9 | |||
| 622a8c5995 | |||
| 94d8767ba3 | |||
| eab0afc19c | |||
| f991daed18 | |||
| 9e147d31f6 | |||
| e73a3e1891 | |||
| 19e737b93e | |||
| cd8c4c3fc2 | |||
| 88605f37a6 | |||
| cdd31b4de4 | |||
| a2e379743c | |||
| a0dfc2d30f | |||
| 9a7e63729f | |||
| f6e53e3c2b | |||
| 536aee99bb | |||
| cbadb5243c | |||
| f1299f5038 | |||
| 709c86b5a9 | |||
| 34df26ec3a | |||
| 3e116ed331 | |||
| 86caeb7636 | |||
| 3d72d47f09 | |||
| fb56bf2584 | |||
| 2fc6284f04 | |||
| d27b28d958 | |||
| 4eddc459a9 | |||
| d9a81fc0c5 | |||
| c6fe17557e | |||
| 97e688bc22 | |||
| d7f38c5d1d | |||
| 2acae50a0c | |||
| 14ed3b978e | |||
| bdf1669e3f | |||
| 5da7c78ed8 | |||
| dee876ceff | |||
| d1eb88f42d | |||
| 7246785a67 | |||
| fdb2351ebb | |||
| 83d803ba02 | |||
| 8d79e5ca49 | |||
| 4b91965731 | |||
| e94d63f6cb | |||
| 4210cd96fc | |||
| 7169d1ea7b | |||
| df1b0fb54d | |||
| 5c2d66a2f5 | |||
| 1c8c2d9ab3 | |||
| 96897a3535 | |||
| 8cbd0bd137 | |||
| 0b1f552a24 | |||
| 31b0560ab4 | |||
| 6fc940ed09 | |||
| 570218878a | |||
| 2a5c990038 | |||
| c8d3fa0dfd | |||
| 93bd2f7099 | |||
| 900daec24e | |||
| 587197dcd2 | |||
| 8fae93ca19 | |||
| 803498318c | |||
| 698c9e2dbd | |||
| c969366870 | |||
| c9837a0d27 | |||
| dd3a7f9641 | |||
| 641f418e10 | |||
| eed31db948 | |||
| 1321356bdf | |||
| f51188cbe7 | |||
| 31245775e5 | |||
| b54cb0bd82 | |||
| 6710d1d5ef | |||
| 8e13b73593 | |||
| d6b4f48ecb | |||
| 495c157d6f | |||
| 2f3b5f4dcc | |||
| 8dcfaea08d | |||
| 77b862847b | |||
| c130e67dce | |||
| 22a32cf485 | |||
| 0d8e554d42 | |||
| 937f67074d | |||
| d478257d9b | |||
| 7c07a47dfb | |||
| 1fbaa3c117 | |||
| 85395e4901 | |||
| 7c7962ba89 | |||
| 480a9d6ba0 | |||
| 0c3d23dff7 | |||
| 3e0c62b611 | |||
| 226973a9c5 | |||
| 4cda2d73ef | |||
| b82fe7d258 | |||
| e7381c4596 | |||
| 77c0ce8c0c | |||
| 78f4a0e7e5 | |||
| 63fddcf69c | |||
| c6d5e56595 | |||
| 4ed763779e | |||
| bf1a06a437 | |||
| b972125ced | |||
| ba542ffb49 | |||
| 263fac71a2 | |||
| 781220acab | |||
| 84acf0c7bb | |||
| e4bf9910dc | |||
| 0dd579c9cf | |||
| 322037e842 | |||
| f285e4c3ad | |||
| ddaafd78fb | |||
| ece6c51458 | |||
| c9df1b1d53 | |||
| 3b7e612a5e | |||
| cdd8659231 | |||
| 9e795eac88 | |||
| 31563e056d | |||
| 8bb52bd240 | |||
| b1aa4982cd | |||
| ae37ceacbd | |||
| 9a0399e18d | |||
| b01483faa0 | |||
| 45aaf5f7ab | |||
| 04fd783cc5 | |||
| d51302cca0 | |||
| 12e44af5d3 | |||
| 24db8cc329 | |||
| 769948fad2 | |||
| 8ea412a86f | |||
| 1cd16512dc | |||
| b9720dd6f2 | |||
| 89be094e29 | |||
| 4bbad604eb | |||
| ad2c431097 | |||
| 95a5f271e5 | |||
| 3be965c5db | |||
| ba607db180 |
@ -3,7 +3,6 @@ orbs:
|
||||
gcp-gke: circleci/gcp-gke@1.0.4
|
||||
go: circleci/go@1.3.0
|
||||
|
||||
|
||||
# TPU REFERENCES
|
||||
references:
|
||||
checkout_ml_testing: &checkout_ml_testing
|
||||
@ -69,6 +68,8 @@ jobs:
|
||||
- image: circleci/python:3.6
|
||||
environment:
|
||||
OMP_NUM_THREADS: 1
|
||||
RUN_PT_TF_CROSS_TESTS: yes
|
||||
TRANSFORMERS_IS_CI: yes
|
||||
resource_class: xlarge
|
||||
parallelism: 1
|
||||
steps:
|
||||
@ -77,14 +78,45 @@ jobs:
|
||||
keys:
|
||||
- v0.4-torch_and_tf-{{ checksum "setup.py" }}
|
||||
- v0.4-{{ checksum "setup.py" }}
|
||||
- run: sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev
|
||||
- run: pip install --upgrade pip
|
||||
- run: pip install .[sklearn,tf-cpu,torch,testing,sentencepiece]
|
||||
- run: pip install tapas torch-scatter -f https://pytorch-geometric.com/whl/torch-1.7.0+cpu.html
|
||||
- run: pip install .[sklearn,tf-cpu,torch,testing,sentencepiece,speech,vision]
|
||||
- run: pip install torch-scatter -f https://pytorch-geometric.com/whl/torch-1.8.0+cpu.html
|
||||
- save_cache:
|
||||
key: v0.4-{{ checksum "setup.py" }}
|
||||
paths:
|
||||
- '~/.cache/pip'
|
||||
- run: RUN_PT_TF_CROSS_TESTS=1 python -m pytest -n 8 --dist=loadfile -rA -s --make-reports=tests_torch_and_tf ./tests/ -m is_pt_tf_cross_test --durations=0 | tee tests_output.txt
|
||||
- run: python -m pytest -n 8 --dist=loadfile -rA -s --make-reports=tests_torch_and_tf ./tests/ -m is_pt_tf_cross_test --durations=0 | tee tests_output.txt
|
||||
- store_artifacts:
|
||||
path: ~/transformers/tests_output.txt
|
||||
- store_artifacts:
|
||||
path: ~/transformers/reports
|
||||
|
||||
run_tests_torch_and_flax:
|
||||
working_directory: ~/transformers
|
||||
docker:
|
||||
- image: circleci/python:3.6
|
||||
environment:
|
||||
OMP_NUM_THREADS: 1
|
||||
RUN_PT_FLAX_CROSS_TESTS: yes
|
||||
TRANSFORMERS_IS_CI: yes
|
||||
resource_class: xlarge
|
||||
parallelism: 1
|
||||
steps:
|
||||
- checkout
|
||||
- restore_cache:
|
||||
keys:
|
||||
- v0.4-torch_and_flax-{{ checksum "setup.py" }}
|
||||
- v0.4-{{ checksum "setup.py" }}
|
||||
- run: sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev
|
||||
- run: pip install --upgrade pip
|
||||
- run: pip install .[sklearn,flax,torch,testing,sentencepiece,speech,vision]
|
||||
- run: pip install torch-scatter -f https://pytorch-geometric.com/whl/torch-1.8.0+cpu.html
|
||||
- save_cache:
|
||||
key: v0.4-{{ checksum "setup.py" }}
|
||||
paths:
|
||||
- '~/.cache/pip'
|
||||
- run: python -m pytest -n 8 --dist=loadfile -rA -s --make-reports=tests_torch_and_flax ./tests/ -m is_pt_flax_cross_test --durations=0 | tee tests_output.txt
|
||||
- store_artifacts:
|
||||
path: ~/transformers/tests_output.txt
|
||||
- store_artifacts:
|
||||
@ -96,6 +128,7 @@ jobs:
|
||||
- image: circleci/python:3.7
|
||||
environment:
|
||||
OMP_NUM_THREADS: 1
|
||||
TRANSFORMERS_IS_CI: yes
|
||||
resource_class: xlarge
|
||||
parallelism: 1
|
||||
steps:
|
||||
@ -104,14 +137,15 @@ jobs:
|
||||
keys:
|
||||
- v0.4-torch-{{ checksum "setup.py" }}
|
||||
- v0.4-{{ checksum "setup.py" }}
|
||||
- run: sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev
|
||||
- run: pip install --upgrade pip
|
||||
- run: pip install .[sklearn,torch,testing,sentencepiece]
|
||||
- run: pip install tapas torch-scatter -f https://pytorch-geometric.com/whl/torch-1.7.0+cpu.html
|
||||
- run: pip install .[sklearn,torch,testing,sentencepiece,speech,vision]
|
||||
- run: pip install torch-scatter -f https://pytorch-geometric.com/whl/torch-1.8.0+cpu.html
|
||||
- save_cache:
|
||||
key: v0.4-torch-{{ checksum "setup.py" }}
|
||||
paths:
|
||||
- '~/.cache/pip'
|
||||
- run: python -m pytest -n 8 --dist=loadfile -s --make-reports=tests_torch ./tests/ | tee tests_output.txt
|
||||
- run: python -m pytest -n 3 --dist=loadfile -s --make-reports=tests_torch ./tests/ | tee tests_output.txt
|
||||
- store_artifacts:
|
||||
path: ~/transformers/tests_output.txt
|
||||
- store_artifacts:
|
||||
@ -123,6 +157,7 @@ jobs:
|
||||
- image: circleci/python:3.7
|
||||
environment:
|
||||
OMP_NUM_THREADS: 1
|
||||
TRANSFORMERS_IS_CI: yes
|
||||
resource_class: xlarge
|
||||
parallelism: 1
|
||||
steps:
|
||||
@ -149,6 +184,7 @@ jobs:
|
||||
- image: circleci/python:3.7
|
||||
environment:
|
||||
OMP_NUM_THREADS: 1
|
||||
TRANSFORMERS_IS_CI: yes
|
||||
resource_class: xlarge
|
||||
parallelism: 1
|
||||
steps:
|
||||
@ -158,7 +194,7 @@ jobs:
|
||||
- v0.4-flax-{{ checksum "setup.py" }}
|
||||
- v0.4-{{ checksum "setup.py" }}
|
||||
- run: pip install --upgrade pip
|
||||
- run: sudo pip install .[flax,sklearn,torch,testing,sentencepiece]
|
||||
- run: sudo pip install .[flax,testing,sentencepiece]
|
||||
- save_cache:
|
||||
key: v0.4-flax-{{ checksum "setup.py" }}
|
||||
paths:
|
||||
@ -175,6 +211,8 @@ jobs:
|
||||
- image: circleci/python:3.7
|
||||
environment:
|
||||
OMP_NUM_THREADS: 1
|
||||
RUN_PIPELINE_TESTS: yes
|
||||
TRANSFORMERS_IS_CI: yes
|
||||
resource_class: xlarge
|
||||
parallelism: 1
|
||||
steps:
|
||||
@ -183,14 +221,15 @@ jobs:
|
||||
keys:
|
||||
- v0.4-torch-{{ checksum "setup.py" }}
|
||||
- v0.4-{{ checksum "setup.py" }}
|
||||
- run: sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev
|
||||
- run: pip install --upgrade pip
|
||||
- run: pip install .[sklearn,torch,testing,sentencepiece]
|
||||
- run: pip install tapas torch-scatter -f https://pytorch-geometric.com/whl/torch-1.7.0+cpu.html
|
||||
- run: pip install .[sklearn,torch,testing,sentencepiece,speech,vision]
|
||||
- run: pip install torch-scatter -f https://pytorch-geometric.com/whl/torch-1.8.0+cpu.html
|
||||
- save_cache:
|
||||
key: v0.4-torch-{{ checksum "setup.py" }}
|
||||
paths:
|
||||
- '~/.cache/pip'
|
||||
- run: RUN_PIPELINE_TESTS=1 python -m pytest -n 8 --dist=loadfile -rA -s --make-reports=tests_pipelines_torch -m is_pipeline_test ./tests/ | tee tests_output.txt
|
||||
- run: python -m pytest -n 8 --dist=loadfile -rA -s --make-reports=tests_pipelines_torch -m is_pipeline_test ./tests/ | tee tests_output.txt
|
||||
- store_artifacts:
|
||||
path: ~/transformers/tests_output.txt
|
||||
- store_artifacts:
|
||||
@ -202,6 +241,8 @@ jobs:
|
||||
- image: circleci/python:3.7
|
||||
environment:
|
||||
OMP_NUM_THREADS: 1
|
||||
RUN_PIPELINE_TESTS: yes
|
||||
TRANSFORMERS_IS_CI: yes
|
||||
resource_class: xlarge
|
||||
parallelism: 1
|
||||
steps:
|
||||
@ -216,7 +257,7 @@ jobs:
|
||||
key: v0.4-tf-{{ checksum "setup.py" }}
|
||||
paths:
|
||||
- '~/.cache/pip'
|
||||
- run: RUN_PIPELINE_TESTS=1 python -m pytest -n 8 --dist=loadfile -rA -s --make-reports=tests_pipelines_tf ./tests/ -m is_pipeline_test | tee tests_output.txt
|
||||
- run: python -m pytest -n 8 --dist=loadfile -rA -s --make-reports=tests_pipelines_tf ./tests/ -m is_pipeline_test | tee tests_output.txt
|
||||
- store_artifacts:
|
||||
path: ~/transformers/tests_output.txt
|
||||
- store_artifacts:
|
||||
@ -228,6 +269,7 @@ jobs:
|
||||
- image: circleci/python:3.7
|
||||
environment:
|
||||
RUN_CUSTOM_TOKENIZERS: yes
|
||||
TRANSFORMERS_IS_CI: yes
|
||||
steps:
|
||||
- checkout
|
||||
- restore_cache:
|
||||
@ -235,7 +277,7 @@ jobs:
|
||||
- v0.4-custom_tokenizers-{{ checksum "setup.py" }}
|
||||
- v0.4-{{ checksum "setup.py" }}
|
||||
- run: pip install --upgrade pip
|
||||
- run: pip install .[ja,testing,sentencepiece]
|
||||
- run: pip install .[ja,testing,sentencepiece,jieba]
|
||||
- run: python -m unidic download
|
||||
- save_cache:
|
||||
key: v0.4-custom_tokenizers-{{ checksum "setup.py" }}
|
||||
@ -253,6 +295,7 @@ jobs:
|
||||
- image: circleci/python:3.6
|
||||
environment:
|
||||
OMP_NUM_THREADS: 1
|
||||
TRANSFORMERS_IS_CI: yes
|
||||
resource_class: xlarge
|
||||
parallelism: 1
|
||||
steps:
|
||||
@ -263,32 +306,44 @@ jobs:
|
||||
- v0.4-{{ checksum "setup.py" }}
|
||||
- run: pip install --upgrade pip
|
||||
- run: pip install .[sklearn,torch,sentencepiece,testing]
|
||||
- run: pip install -r examples/_tests_requirements.txt
|
||||
- run: pip install -r examples/pytorch/_tests_requirements.txt
|
||||
- save_cache:
|
||||
key: v0.4-torch_examples-{{ checksum "setup.py" }}
|
||||
paths:
|
||||
- '~/.cache/pip'
|
||||
- run: python -m pytest -n 8 --dist=loadfile -s --make-reports=examples_torch ./examples/ | tee examples_output.txt
|
||||
- run: TRANSFORMERS_IS_CI=1 python -m pytest -n 8 --dist=loadfile -s --make-reports=examples_torch ./examples/pytorch/ | tee examples_output.txt
|
||||
- store_artifacts:
|
||||
path: ~/transformers/examples_output.txt
|
||||
- store_artifacts:
|
||||
path: ~/transformers/reports
|
||||
|
||||
run_tests_git_lfs:
|
||||
run_tests_hub:
|
||||
working_directory: ~/transformers
|
||||
docker:
|
||||
- image: circleci/python:3.7
|
||||
environment:
|
||||
HUGGINGFACE_CO_STAGING: yes
|
||||
RUN_GIT_LFS_TESTS: yes
|
||||
TRANSFORMERS_IS_CI: yes
|
||||
resource_class: xlarge
|
||||
parallelism: 1
|
||||
steps:
|
||||
- checkout
|
||||
- restore_cache:
|
||||
keys:
|
||||
- v0.4-hub-{{ checksum "setup.py" }}
|
||||
- v0.4-{{ checksum "setup.py" }}
|
||||
- run: sudo apt-get install git-lfs
|
||||
- run: |
|
||||
git config --global user.email "ci@dummy.com"
|
||||
git config --global user.name "ci"
|
||||
- run: pip install --upgrade pip
|
||||
- run: pip install .[testing]
|
||||
- run: RUN_GIT_LFS_TESTS=1 python -m pytest -sv ./tests/test_hf_api.py -k "HfLargefilesTest"
|
||||
- run: pip install .[torch,sentencepiece,testing]
|
||||
- save_cache:
|
||||
key: v0.4-hub-{{ checksum "setup.py" }}
|
||||
paths:
|
||||
- '~/.cache/pip'
|
||||
- run: python -m pytest -sv ./tests/ -m is_staging_test
|
||||
|
||||
build_doc:
|
||||
working_directory: ~/transformers
|
||||
@ -300,13 +355,14 @@ jobs:
|
||||
keys:
|
||||
- v0.4-build_doc-{{ checksum "setup.py" }}
|
||||
- v0.4-{{ checksum "setup.py" }}
|
||||
- run: sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev
|
||||
- run: pip install --upgrade pip
|
||||
- run: pip install ."[all, docs]"
|
||||
- run: pip install ."[docs]"
|
||||
- save_cache:
|
||||
key: v0.4-build_doc-{{ checksum "setup.py" }}
|
||||
paths:
|
||||
- '~/.cache/pip'
|
||||
- run: cd docs && make html SPHINXOPTS="-W"
|
||||
- run: cd docs && make html SPHINXOPTS="-W -j 4"
|
||||
- store_artifacts:
|
||||
path: ./docs/_build
|
||||
|
||||
@ -323,7 +379,7 @@ jobs:
|
||||
keys:
|
||||
- v0.4-deploy_doc-{{ checksum "setup.py" }}
|
||||
- v0.4-{{ checksum "setup.py" }}
|
||||
- run: pip install ."[all,docs]"
|
||||
- run: pip install ."[docs]"
|
||||
- save_cache:
|
||||
key: v0.4-deploy_doc-{{ checksum "setup.py" }}
|
||||
paths:
|
||||
@ -335,6 +391,8 @@ jobs:
|
||||
docker:
|
||||
- image: circleci/python:3.6
|
||||
resource_class: medium
|
||||
environment:
|
||||
TRANSFORMERS_IS_CI: yes
|
||||
parallelism: 1
|
||||
steps:
|
||||
- checkout
|
||||
@ -351,12 +409,14 @@ jobs:
|
||||
- '~/.cache/pip'
|
||||
- run: black --check examples tests src utils
|
||||
- run: isort --check-only examples tests src utils
|
||||
- run: python utils/custom_init_isort.py --check_only
|
||||
- run: flake8 examples tests src utils
|
||||
- run: python utils/style_doc.py src/transformers docs/source --max_len 119 --check_only
|
||||
- run: python utils/check_copies.py
|
||||
- run: python utils/check_table.py
|
||||
- run: python utils/check_dummies.py
|
||||
- run: python utils/check_repo.py
|
||||
- run: python utils/check_inits.py
|
||||
|
||||
check_repository_consistency:
|
||||
working_directory: ~/transformers
|
||||
@ -375,6 +435,7 @@ jobs:
|
||||
- image: circleci/python:3.6
|
||||
environment:
|
||||
OMP_NUM_THREADS: 1
|
||||
TRANSFORMERS_IS_CI: yes
|
||||
resource_class: xlarge
|
||||
parallelism: 1
|
||||
steps:
|
||||
@ -413,12 +474,13 @@ workflows:
|
||||
- run_examples_torch
|
||||
- run_tests_custom_tokenizers
|
||||
- run_tests_torch_and_tf
|
||||
- run_tests_torch_and_flax
|
||||
- run_tests_torch
|
||||
- run_tests_tf
|
||||
- run_tests_flax
|
||||
- run_tests_pipelines_torch
|
||||
- run_tests_pipelines_tf
|
||||
- run_tests_git_lfs
|
||||
- run_tests_hub
|
||||
- build_doc
|
||||
- deploy_doc: *workflow_filters
|
||||
# tpu_testing_jobs:
|
||||
|
||||
@ -3,6 +3,7 @@ cd docs
|
||||
function deploy_doc(){
|
||||
echo "Creating doc at commit $1 and pushing to folder $2"
|
||||
git checkout $1
|
||||
pip install -U ..
|
||||
if [ ! -z "$2" ]
|
||||
then
|
||||
if [ "$2" == "master" ]; then
|
||||
@ -45,7 +46,7 @@ deploy_doc "6f5a12a" v2.7.0
|
||||
deploy_doc "11c3257" v2.8.0
|
||||
deploy_doc "e7cfc1a" v2.9.0
|
||||
deploy_doc "7cb203f" v2.9.1
|
||||
deploy_doc "10d7239" v2.10.0
|
||||
deploy_doc "10d7239" v2.10.0
|
||||
deploy_doc "b42586e" v2.11.0
|
||||
deploy_doc "7fb8bdf" v3.0.2
|
||||
deploy_doc "4b3ee9c" v3.1.0
|
||||
@ -53,6 +54,12 @@ deploy_doc "3ebb1b3" v3.2.0
|
||||
deploy_doc "0613f05" v3.3.1
|
||||
deploy_doc "eb0e0ce" v3.4.0
|
||||
deploy_doc "818878d" v3.5.1
|
||||
deploy_doc "c781171" v4.0.0
|
||||
deploy_doc "c781171" v4.0.1
|
||||
deploy_doc "bfa4ccf" v4.1.1
|
||||
deploy_doc "7d9a9d0" # v4.2.0 Latest stable release
|
||||
deploy_doc "7d9a9d0" v4.2.2
|
||||
deploy_doc "bae0c79" v4.3.3
|
||||
deploy_doc "c988db5" v4.4.0
|
||||
deploy_doc "c5d6a28" v4.4.1
|
||||
deploy_doc "6bc89ed" v4.4.2
|
||||
deploy_doc "4906a29" v4.5.0
|
||||
deploy_doc "4bae96e" # v4.5.1 Latest stable release
|
||||
3
.gitattributes
vendored
Normal file
3
.gitattributes
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
*.py eol=lf
|
||||
*.rst eol=lf
|
||||
*.md eol=lf
|
||||
10
.github/ISSUE_TEMPLATE/bug-report.md
vendored
10
.github/ISSUE_TEMPLATE/bug-report.md
vendored
@ -34,7 +34,7 @@ Models:
|
||||
- funnel: @sgugger
|
||||
- gpt2: @patrickvonplaten, @LysandreJik
|
||||
- rag: @patrickvonplaten, @lhoestq
|
||||
- tensorflow: @jplu
|
||||
- tensorflow: @Rocketknight1
|
||||
|
||||
Library:
|
||||
|
||||
@ -42,15 +42,19 @@ Library:
|
||||
- deepspeed: @stas00
|
||||
- ray/raytune: @richardliaw, @amogkam
|
||||
- text generation: @patrickvonplaten
|
||||
- tokenizers: @n1t0, @LysandreJik
|
||||
- tokenizers: @LysandreJik
|
||||
- trainer: @sgugger
|
||||
- pipelines: @LysandreJik
|
||||
|
||||
Documentation: @sgugger
|
||||
|
||||
Model hub:
|
||||
|
||||
- for issues with a model report at https://discuss.huggingface.co/ and tag the model's creator.
|
||||
|
||||
HF projects:
|
||||
|
||||
- nlp datasets: [different repo](https://github.com/huggingface/nlp)
|
||||
- datasets: [different repo](https://github.com/huggingface/datasets)
|
||||
- rust tokenizers: [different repo](https://github.com/huggingface/tokenizers)
|
||||
|
||||
Examples:
|
||||
|
||||
6
.github/PULL_REQUEST_TEMPLATE.md
vendored
6
.github/PULL_REQUEST_TEMPLATE.md
vendored
@ -30,7 +30,7 @@ Fixes # (issue)
|
||||
## Who can review?
|
||||
|
||||
Anyone in the community is free to review the PR once the tests have passed. Feel free to tag
|
||||
members/contributors which may be interested in your PR.
|
||||
members/contributors who may be interested in your PR.
|
||||
|
||||
<!-- Your PR will be replied to more quickly if you can figure out the right person to tag with @
|
||||
|
||||
@ -46,7 +46,7 @@ Models:
|
||||
- funnel: @sgugger
|
||||
- gpt2: @patrickvonplaten, @LysandreJik
|
||||
- rag: @patrickvonplaten, @lhoestq
|
||||
- tensorflow: @jplu
|
||||
- tensorflow: @LysandreJik
|
||||
|
||||
Library:
|
||||
|
||||
@ -62,7 +62,7 @@ Documentation: @sgugger
|
||||
|
||||
HF projects:
|
||||
|
||||
- nlp datasets: [different repo](https://github.com/huggingface/nlp)
|
||||
- datasets: [different repo](https://github.com/huggingface/datasets)
|
||||
- rust tokenizers: [different repo](https://github.com/huggingface/tokenizers)
|
||||
|
||||
Examples:
|
||||
|
||||
12
.github/conda/meta.yaml
vendored
12
.github/conda/meta.yaml
vendored
@ -14,8 +14,10 @@ requirements:
|
||||
host:
|
||||
- python
|
||||
- pip
|
||||
- numpy
|
||||
- numpy >=1.17
|
||||
- dataclasses
|
||||
- importlib_metadata
|
||||
- huggingface_hub
|
||||
- packaging
|
||||
- filelock
|
||||
- requests
|
||||
@ -23,11 +25,13 @@ requirements:
|
||||
- sacremoses
|
||||
- regex !=2019.12.17
|
||||
- protobuf
|
||||
- tokenizers ==0.9.4
|
||||
- tokenizers >=0.10.1,<0.11.0
|
||||
run:
|
||||
- python
|
||||
- numpy
|
||||
- numpy >=1.17
|
||||
- dataclasses
|
||||
- importlib_metadata
|
||||
- huggingface_hub
|
||||
- packaging
|
||||
- filelock
|
||||
- requests
|
||||
@ -35,7 +39,7 @@ requirements:
|
||||
- sacremoses
|
||||
- regex !=2019.12.17
|
||||
- protobuf
|
||||
- tokenizers ==0.9.4
|
||||
- tokenizers >=0.10.1,<0.11.0
|
||||
|
||||
test:
|
||||
imports:
|
||||
|
||||
18
.github/stale.yml
vendored
18
.github/stale.yml
vendored
@ -1,18 +0,0 @@
|
||||
# Number of days of inactivity before an issue becomes stale
|
||||
daysUntilStale: 60
|
||||
# Number of days of inactivity before a stale issue is closed
|
||||
daysUntilClose: 7
|
||||
# Issues with these labels will never be considered stale
|
||||
exemptLabels:
|
||||
- pinned
|
||||
- security
|
||||
- Feature request
|
||||
# Label to use when marking an issue as stale
|
||||
staleLabel: wontfix
|
||||
# Comment to post when marking an issue as stale. Set to `false` to disable
|
||||
markComment: >
|
||||
This issue has been automatically marked as stale because it has not had
|
||||
recent activity. It will be closed if no further activity occurs. Thank you
|
||||
for your contributions.
|
||||
# Comment to post when closing a stale issue. Set to `false` to disable
|
||||
closeComment: false
|
||||
9
.github/workflows/model-templates.yml
vendored
9
.github/workflows/model-templates.yml
vendored
@ -2,14 +2,15 @@ name: Model templates runner
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
pull_request:
|
||||
paths:
|
||||
- "src/**"
|
||||
- "tests/**"
|
||||
- ".github/**"
|
||||
- "templates/**"
|
||||
pull_request_target:
|
||||
branches:
|
||||
- master
|
||||
types: [assigned, opened, synchronize, reopened]
|
||||
|
||||
jobs:
|
||||
run_tests_templates:
|
||||
@ -36,6 +37,7 @@ jobs:
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
pip install --upgrade pip
|
||||
sudo apt -y update && sudo apt install -y libsndfile1-dev
|
||||
pip install .[dev]
|
||||
- name: Create model files
|
||||
run: |
|
||||
@ -48,6 +50,7 @@ jobs:
|
||||
make style
|
||||
python utils/check_table.py --fix_and_overwrite
|
||||
python utils/check_dummies.py --fix_and_overwrite
|
||||
python utils/check_copies.py --fix_and_overwrite
|
||||
|
||||
- name: Run all non-slow tests
|
||||
run: |
|
||||
|
||||
1
.github/workflows/release-conda.yml
vendored
1
.github/workflows/release-conda.yml
vendored
@ -24,6 +24,7 @@ jobs:
|
||||
with:
|
||||
auto-update-conda: true
|
||||
auto-activate-base: false
|
||||
python-version: 3.8
|
||||
activate-environment: "build-transformers"
|
||||
channels: huggingface
|
||||
|
||||
|
||||
326
.github/workflows/self-push.yml
vendored
326
.github/workflows/self-push.yml
vendored
@ -5,148 +5,97 @@ on:
|
||||
branches:
|
||||
- master
|
||||
- ci_*
|
||||
- ci-*
|
||||
paths:
|
||||
- "src/**"
|
||||
- "tests/**"
|
||||
- ".github/**"
|
||||
- "templates/**"
|
||||
# pull_request:
|
||||
repository_dispatch:
|
||||
|
||||
env:
|
||||
HF_HOME: /mnt/cache
|
||||
TRANSFORMERS_IS_CI: yes
|
||||
OMP_NUM_THREADS: 8
|
||||
MKL_NUM_THREADS: 8
|
||||
|
||||
jobs:
|
||||
run_tests_torch_gpu:
|
||||
runs-on: [self-hosted, gpu, single-gpu]
|
||||
runs-on: [self-hosted, docker-gpu, single-gpu]
|
||||
container:
|
||||
image: pytorch/pytorch:1.8.0-cuda11.1-cudnn8-runtime
|
||||
options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Python version
|
||||
- name: Launcher docker
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: NVIDIA-SMI
|
||||
run: |
|
||||
which python
|
||||
python --version
|
||||
pip --version
|
||||
|
||||
- name: Current dir
|
||||
run: pwd
|
||||
- run: nvidia-smi
|
||||
|
||||
- name: Loading cache.
|
||||
uses: actions/cache@v2
|
||||
id: cache
|
||||
with:
|
||||
path: .env
|
||||
key: v1.1-tests_torch_gpu-${{ hashFiles('setup.py') }}
|
||||
|
||||
- name: Create new python env (on self-hosted runners we have to handle isolation ourselves)
|
||||
run: |
|
||||
python -m venv .env
|
||||
source .env/bin/activate
|
||||
which python
|
||||
python --version
|
||||
pip --version
|
||||
nvidia-smi
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
source .env/bin/activate
|
||||
apt -y update && apt install -y libsndfile1-dev
|
||||
pip install --upgrade pip
|
||||
pip install .[torch,sklearn,testing,onnxruntime,sentencepiece]
|
||||
pip install git+https://github.com/huggingface/datasets
|
||||
pip install pandas torch-scatter -f https://pytorch-geometric.com/whl/torch-1.7.0+cu102.html
|
||||
pip install .[sklearn,testing,onnxruntime,sentencepiece,speech]
|
||||
|
||||
- name: Are GPUs recognized by our DL frameworks
|
||||
run: |
|
||||
source .env/bin/activate
|
||||
python -c "import torch; print('Cuda available:', torch.cuda.is_available())"
|
||||
python -c "import torch; print('Cuda version:', torch.version.cuda)"
|
||||
python -c "import torch; print('CuDNN version:', torch.backends.cudnn.version())"
|
||||
python -c "import torch; print('Number of GPUs available:', torch.cuda.device_count())"
|
||||
|
||||
# - name: Create model files
|
||||
# run: |
|
||||
# source .env/bin/activate
|
||||
# transformers-cli add-new-model --testing --testing_file=templates/adding_a_new_model/tests/encoder-bert-tokenizer.json --path=templates/adding_a_new_model
|
||||
# transformers-cli add-new-model --testing --testing_file=templates/adding_a_new_model/tests/pt-encoder-bert-tokenizer.json --path=templates/adding_a_new_model
|
||||
# transformers-cli add-new-model --testing --testing_file=templates/adding_a_new_model/tests/standalone.json --path=templates/adding_a_new_model
|
||||
# transformers-cli add-new-model --testing --testing_file=templates/adding_a_new_model/tests/tf-encoder-bert-tokenizer.json --path=templates/adding_a_new_model
|
||||
|
||||
- name: Run all non-slow tests on GPU
|
||||
env:
|
||||
OMP_NUM_THREADS: 1
|
||||
CUDA_VISIBLE_DEVICES: 0
|
||||
run: |
|
||||
source .env/bin/activate
|
||||
python -m pytest -n 2 --dist=loadfile -s --make-reports=tests_torch_gpu tests
|
||||
python -m pytest -n 2 --dist=loadfile --make-reports=tests_torch_gpu tests
|
||||
|
||||
- name: Failure short reports
|
||||
if: ${{ always() }}
|
||||
run: cat reports/tests_torch_gpu_failures_short.txt
|
||||
|
||||
|
||||
- name: Test suite reports artifacts
|
||||
if: ${{ always() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: run_all_tests_torch_gpu_test_reports
|
||||
path: reports
|
||||
|
||||
|
||||
run_tests_tf_gpu:
|
||||
runs-on: [self-hosted, gpu, single-gpu]
|
||||
runs-on: [self-hosted, docker-gpu, single-gpu]
|
||||
timeout-minutes: 120
|
||||
container:
|
||||
image: tensorflow/tensorflow:2.4.1-gpu
|
||||
options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Python version
|
||||
run: |
|
||||
which python
|
||||
python --version
|
||||
pip --version
|
||||
- name: Current dir
|
||||
run: pwd
|
||||
- run: nvidia-smi
|
||||
- name: Launcher docker
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Loading cache.
|
||||
uses: actions/cache@v2
|
||||
id: cache
|
||||
with:
|
||||
path: .env
|
||||
key: v1.1-tests_tf_gpu-${{ hashFiles('setup.py') }}
|
||||
|
||||
- name: Create new python env (on self-hosted runners we have to handle isolation ourselves)
|
||||
- name: NVIDIA-SMI
|
||||
run: |
|
||||
python -m venv .env
|
||||
source .env/bin/activate
|
||||
which python
|
||||
python --version
|
||||
pip --version
|
||||
nvidia-smi
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
source .env/bin/activate
|
||||
pip install --upgrade pip
|
||||
pip install .[tf,sklearn,testing,onnxruntime,sentencepiece]
|
||||
pip install git+https://github.com/huggingface/datasets
|
||||
pip install .[sklearn,testing,onnxruntime,sentencepiece]
|
||||
|
||||
- name: Are GPUs recognized by our DL frameworks
|
||||
run: |
|
||||
source .env/bin/activate
|
||||
TF_CPP_MIN_LOG_LEVEL=3 python -c "import tensorflow as tf; print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU')))"
|
||||
TF_CPP_MIN_LOG_LEVEL=3 python -c "import tensorflow as tf; print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU')))"
|
||||
|
||||
- name: Create model files
|
||||
run: |
|
||||
source .env/bin/activate
|
||||
# transformers-cli add-new-model --testing --testing_file=templates/adding_a_new_model/tests/encoder-bert-tokenizer.json --path=templates/adding_a_new_model
|
||||
# transformers-cli add-new-model --testing --testing_file=templates/adding_a_new_model/tests/pt-encoder-bert-tokenizer.json --path=templates/adding_a_new_model
|
||||
# transformers-cli add-new-model --testing --testing_file=templates/adding_a_new_model/tests/standalone.json --path=templates/adding_a_new_model
|
||||
# transformers-cli add-new-model --testing --testing_file=templates/adding_a_new_model/tests/tf-encoder-bert-tokenizer.json --path=templates/adding_a_new_model
|
||||
|
||||
- name: Run all non-slow tests on GPU
|
||||
env:
|
||||
OMP_NUM_THREADS: 1
|
||||
CUDA_VISIBLE_DEVICES: 0
|
||||
TF_NUM_INTRAOP_THREADS: 8
|
||||
TF_NUM_INTEROP_THREADS: 1
|
||||
run: |
|
||||
source .env/bin/activate
|
||||
python -m pytest -n 2 --dist=loadfile -s --make-reports=tests_tf_gpu tests
|
||||
python -m pytest -n 1 --dist=loadfile --make-reports=tests_tf_gpu tests
|
||||
|
||||
- name: Failure short reports
|
||||
if: ${{ always() }}
|
||||
run: cat reports/tests_tf_gpu_failures_short.txt
|
||||
|
||||
|
||||
- name: Test suite reports artifacts
|
||||
if: ${{ always() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
@ -154,58 +103,42 @@ jobs:
|
||||
name: run_all_tests_tf_gpu_test_reports
|
||||
path: reports
|
||||
|
||||
|
||||
run_tests_torch_multi_gpu:
|
||||
runs-on: [self-hosted, gpu, multi-gpu]
|
||||
runs-on: [self-hosted, docker-gpu, multi-gpu]
|
||||
container:
|
||||
image: pytorch/pytorch:1.8.0-cuda11.1-cudnn8-runtime
|
||||
options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Python version
|
||||
- name: Launcher docker
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: NVIDIA-SMI
|
||||
run: |
|
||||
which python
|
||||
python --version
|
||||
pip --version
|
||||
nvidia-smi
|
||||
|
||||
- name: Current dir
|
||||
run: pwd
|
||||
- run: nvidia-smi
|
||||
|
||||
- name: Loading cache.
|
||||
uses: actions/cache@v2
|
||||
id: cache
|
||||
with:
|
||||
path: .env
|
||||
key: v1.1-tests_torch_multi_gpu-${{ hashFiles('setup.py') }}
|
||||
|
||||
- name: Create new python env (on self-hosted runners we have to handle isolation ourselves)
|
||||
run: |
|
||||
python -m venv .env
|
||||
source .env/bin/activate
|
||||
which python
|
||||
python --version
|
||||
pip --version
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
source .env/bin/activate
|
||||
apt -y update && apt install -y libsndfile1-dev
|
||||
pip install --upgrade pip
|
||||
pip install .[torch,sklearn,testing,onnxruntime,sentencepiece]
|
||||
pip install git+https://github.com/huggingface/datasets
|
||||
pip install pandas torch-scatter -f https://pytorch-geometric.com/whl/torch-1.7.0+cu102.html
|
||||
pip install .[sklearn,testing,onnxruntime,sentencepiece,speech]
|
||||
|
||||
- name: Are GPUs recognized by our DL frameworks
|
||||
run: |
|
||||
source .env/bin/activate
|
||||
python -c "import torch; print('Cuda available:', torch.cuda.is_available())"
|
||||
python -c "import torch; print('Cuda version:', torch.version.cuda)"
|
||||
python -c "import torch; print('CuDNN version:', torch.backends.cudnn.version())"
|
||||
python -c "import torch; print('Number of GPUs available:', torch.cuda.device_count())"
|
||||
|
||||
- name: Run all non-slow tests on GPU
|
||||
env:
|
||||
OMP_NUM_THREADS: 1
|
||||
MKL_SERVICE_FORCE_INTEL: 1
|
||||
run: |
|
||||
source .env/bin/activate
|
||||
python -m pytest -n 2 --dist=loadfile -s --make-reports=tests_torch_multi_gpu tests
|
||||
python -m pytest -n 2 --dist=loadfile --make-reports=tests_torch_multi_gpu tests
|
||||
|
||||
- name: Failure short reports
|
||||
if: ${{ always() }}
|
||||
run: cat reports/tests_torch_multi_gpu_failures_short.txt
|
||||
run: cat reports/tests_torch_multi_gpu_failures_short.txt
|
||||
|
||||
- name: Test suite reports artifacts
|
||||
if: ${{ always() }}
|
||||
@ -215,52 +148,35 @@ jobs:
|
||||
path: reports
|
||||
|
||||
run_tests_tf_multi_gpu:
|
||||
runs-on: [self-hosted, gpu, multi-gpu]
|
||||
runs-on: [self-hosted, docker-gpu, multi-gpu]
|
||||
timeout-minutes: 120
|
||||
container:
|
||||
image: tensorflow/tensorflow:2.4.1-gpu
|
||||
options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Python version
|
||||
- name: Launcher docker
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: NVIDIA-SMI
|
||||
run: |
|
||||
which python
|
||||
python --version
|
||||
pip --version
|
||||
nvidia-smi
|
||||
|
||||
- name: Current dir
|
||||
run: pwd
|
||||
- run: nvidia-smi
|
||||
|
||||
- name: Loading cache.
|
||||
uses: actions/cache@v2
|
||||
id: cache
|
||||
with:
|
||||
path: .env
|
||||
key: v1.1-tests_tf_multi_gpu-${{ hashFiles('setup.py') }}
|
||||
|
||||
- name: Create new python env (on self-hosted runners we have to handle isolation ourselves)
|
||||
run: |
|
||||
python -m venv .env
|
||||
source .env/bin/activate
|
||||
which python
|
||||
python --version
|
||||
pip --version
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
source .env/bin/activate
|
||||
pip install --upgrade pip
|
||||
pip install .[tf,sklearn,testing,onnxruntime,sentencepiece]
|
||||
pip install git+https://github.com/huggingface/datasets
|
||||
pip install .[sklearn,testing,onnxruntime,sentencepiece]
|
||||
|
||||
- name: Are GPUs recognized by our DL frameworks
|
||||
run: |
|
||||
source .env/bin/activate
|
||||
TF_CPP_MIN_LOG_LEVEL=3 python -c "import tensorflow as tf; print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU')))"
|
||||
TF_CPP_MIN_LOG_LEVEL=3 python -c "import tensorflow as tf; print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU')))"
|
||||
|
||||
- name: Run all non-slow tests on GPU
|
||||
env:
|
||||
OMP_NUM_THREADS: 1
|
||||
TF_NUM_INTRAOP_THREADS: 8
|
||||
TF_NUM_INTEROP_THREADS: 1
|
||||
run: |
|
||||
source .env/bin/activate
|
||||
python -m pytest -n 2 --dist=loadfile -s --make-reports=tests_tf_multi_gpu tests
|
||||
python -m pytest -n 1 --dist=loadfile --make-reports=tests_tf_multi_gpu tests
|
||||
|
||||
- name: Failure short reports
|
||||
if: ${{ always() }}
|
||||
@ -272,4 +188,112 @@ jobs:
|
||||
with:
|
||||
name: run_all_tests_tf_multi_gpu_test_reports
|
||||
path: reports
|
||||
|
||||
|
||||
run_tests_torch_cuda_extensions_gpu:
|
||||
runs-on: [self-hosted, docker-gpu, single-gpu]
|
||||
container:
|
||||
image: nvcr.io/nvidia/pytorch:21.03-py3
|
||||
options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||
steps:
|
||||
- name: Launcher docker
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: NVIDIA-SMI
|
||||
run: |
|
||||
nvidia-smi
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
apt -y update && apt install -y libaio-dev
|
||||
pip install --upgrade pip
|
||||
pip install .[testing,deepspeed]
|
||||
|
||||
- name: Are GPUs recognized by our DL frameworks
|
||||
run: |
|
||||
python -c "import torch; print('Cuda available:', torch.cuda.is_available())"
|
||||
python -c "import torch; print('Cuda version:', torch.version.cuda)"
|
||||
python -c "import torch; print('CuDNN version:', torch.backends.cudnn.version())"
|
||||
python -c "import torch; print('Number of GPUs available:', torch.cuda.device_count())"
|
||||
|
||||
- name: Run all tests on GPU
|
||||
run: |
|
||||
python -m pytest -n 1 --dist=loadfile --make-reports=tests_torch_cuda_extensions_gpu tests/deepspeed tests/extended
|
||||
|
||||
- name: Failure short reports
|
||||
if: ${{ always() }}
|
||||
run: cat reports/tests_torch_cuda_extensions_gpu_failures_short.txt
|
||||
|
||||
- name: Test suite reports artifacts
|
||||
if: ${{ always() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: run_tests_torch_cuda_extensions_gpu_test_reports
|
||||
path: reports
|
||||
|
||||
run_tests_torch_cuda_extensions_multi_gpu:
|
||||
runs-on: [self-hosted, docker-gpu, multi-gpu]
|
||||
container:
|
||||
image: nvcr.io/nvidia/pytorch:21.03-py3
|
||||
options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||
steps:
|
||||
- name: Launcher docker
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: NVIDIA-SMI
|
||||
run: |
|
||||
nvidia-smi
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
apt -y update && apt install -y libaio-dev
|
||||
pip install --upgrade pip
|
||||
pip install .[testing,deepspeed,fairscale]
|
||||
|
||||
- name: Are GPUs recognized by our DL frameworks
|
||||
run: |
|
||||
python -c "import torch; print('Cuda available:', torch.cuda.is_available())"
|
||||
python -c "import torch; print('Cuda version:', torch.version.cuda)"
|
||||
python -c "import torch; print('CuDNN version:', torch.backends.cudnn.version())"
|
||||
python -c "import torch; print('Number of GPUs available:', torch.cuda.device_count())"
|
||||
|
||||
- name: Run all tests on GPU
|
||||
run: |
|
||||
python -m pytest -n 1 --dist=loadfile --make-reports=tests_torch_cuda_extensions_multi_gpu tests/deepspeed tests/extended
|
||||
|
||||
- name: Failure short reports
|
||||
if: ${{ always() }}
|
||||
run: cat reports/tests_torch_cuda_extensions_multi_gpu_failures_short.txt
|
||||
|
||||
- name: Test suite reports artifacts
|
||||
if: ${{ always() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: run_tests_torch_cuda_extensions_multi_gpu_test_reports
|
||||
path: reports
|
||||
|
||||
|
||||
send_results:
|
||||
name: Send results to webhook
|
||||
runs-on: ubuntu-latest
|
||||
if: always()
|
||||
needs: [
|
||||
run_tests_torch_gpu,
|
||||
run_tests_tf_gpu,
|
||||
run_tests_torch_multi_gpu,
|
||||
run_tests_tf_multi_gpu,
|
||||
run_tests_torch_cuda_extensions_gpu,
|
||||
run_tests_torch_cuda_extensions_multi_gpu
|
||||
]
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- uses: actions/download-artifact@v2
|
||||
|
||||
- name: Send message to Slack
|
||||
env:
|
||||
CI_SLACK_BOT_TOKEN: ${{ secrets.CI_SLACK_BOT_TOKEN }}
|
||||
CI_SLACK_CHANNEL_ID: ${{ secrets.CI_SLACK_CHANNEL_ID }}
|
||||
|
||||
run: |
|
||||
pip install slack_sdk
|
||||
python utils/notification_service.py push
|
||||
|
||||
386
.github/workflows/self-scheduled.yml
vendored
386
.github/workflows/self-scheduled.yml
vendored
@ -1,82 +1,66 @@
|
||||
# configuration notes:
|
||||
#
|
||||
# - `source .env/bin/activate` is currently needed to be run first thing first in each step. Otherwise
|
||||
# the step uses the system-wide python interpreter.
|
||||
|
||||
name: Self-hosted runner (scheduled)
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- multi_ci_*
|
||||
repository_dispatch:
|
||||
schedule:
|
||||
- cron: "0 0 * * *"
|
||||
|
||||
env:
|
||||
HF_HOME: /mnt/cache
|
||||
TRANSFORMERS_IS_CI: yes
|
||||
RUN_SLOW: yes
|
||||
OMP_NUM_THREADS: 16
|
||||
MKL_NUM_THREADS: 16
|
||||
|
||||
jobs:
|
||||
run_all_tests_torch_gpu:
|
||||
runs-on: [self-hosted, gpu, single-gpu]
|
||||
runs-on: [self-hosted, docker-gpu, single-gpu]
|
||||
container:
|
||||
image: pytorch/pytorch:1.8.0-cuda11.1-cudnn8-runtime
|
||||
options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Launcher docker
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Loading cache.
|
||||
uses: actions/cache@v2
|
||||
id: cache
|
||||
with:
|
||||
path: .env
|
||||
key: v 1.1-slow_tests_torch_gpu-${{ hashFiles('setup.py') }}
|
||||
|
||||
- name: Python version
|
||||
- name: NVIDIA-SMI
|
||||
run: |
|
||||
which python
|
||||
python --version
|
||||
pip --version
|
||||
|
||||
- name: Current dir
|
||||
run: pwd
|
||||
- run: nvidia-smi
|
||||
|
||||
- name: Create new python env (on self-hosted runners we have to handle isolation ourselves)
|
||||
if: steps.cache.outputs.cache-hit != 'true'
|
||||
run: |
|
||||
python -m venv .env
|
||||
source .env/bin/activate
|
||||
which python
|
||||
python --version
|
||||
pip --version
|
||||
nvidia-smi
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
source .env/bin/activate
|
||||
apt -y update && apt install -y libsndfile1-dev
|
||||
pip install --upgrade pip
|
||||
pip install .[torch,sklearn,testing,onnxruntime,sentencepiece]
|
||||
pip install git+https://github.com/huggingface/datasets
|
||||
pip list
|
||||
pip install .[sklearn,testing,onnxruntime,sentencepiece,speech]
|
||||
|
||||
- name: Are GPUs recognized by our DL frameworks
|
||||
run: |
|
||||
source .env/bin/activate
|
||||
python -c "import torch; print('Cuda available:', torch.cuda.is_available())"
|
||||
python -c "import torch; print('Cuda version:', torch.version.cuda)"
|
||||
python -c "import torch; print('CuDNN version:', torch.backends.cudnn.version())"
|
||||
python -c "import torch; print('Number of GPUs available:', torch.cuda.device_count())"
|
||||
|
||||
- name: Run all tests on GPU
|
||||
env:
|
||||
OMP_NUM_THREADS: 1
|
||||
RUN_SLOW: yes
|
||||
run: |
|
||||
source .env/bin/activate
|
||||
python -m pytest -n 1 --dist=loadfile -s --make-reports=tests_torch_gpu tests
|
||||
python -m pytest -n 1 --dist=loadfile --make-reports=tests_torch_gpu tests
|
||||
|
||||
- name: Failure short reports
|
||||
if: ${{ always() }}
|
||||
run: cat reports/tests_torch_gpu_failures_short.txt
|
||||
|
||||
|
||||
- name: Run examples tests on GPU
|
||||
if: ${{ always() }}
|
||||
env:
|
||||
OMP_NUM_THREADS: 1
|
||||
OMP_NUM_THREADS: 16
|
||||
MKL_NUM_THREADS: 16
|
||||
RUN_SLOW: yes
|
||||
HF_HOME: /mnt/cache
|
||||
TRANSFORMERS_IS_CI: yes
|
||||
run: |
|
||||
source .env/bin/activate
|
||||
pip install -r examples/_tests_requirements.txt
|
||||
python -m pytest -n 1 --dist=loadfile -s --make-reports=examples_torch_gpu examples
|
||||
pip install -r examples/pytorch/_tests_requirements.txt
|
||||
python -m pytest -n 1 --dist=loadfile --make-reports=examples_torch_gpu examples
|
||||
|
||||
- name: Failure short reports
|
||||
if: ${{ always() }}
|
||||
@ -85,13 +69,9 @@ jobs:
|
||||
- name: Run all pipeline tests on GPU
|
||||
if: ${{ always() }}
|
||||
env:
|
||||
TF_FORCE_GPU_ALLOW_GROWTH: "true"
|
||||
OMP_NUM_THREADS: 1
|
||||
RUN_SLOW: yes
|
||||
RUN_PIPELINE_TESTS: yes
|
||||
run: |
|
||||
source .env/bin/activate
|
||||
python -m pytest -n 1 --dist=loadfile -s -m is_pipeline_test --make-reports=tests_torch_pipeline_gpu tests
|
||||
python -m pytest -n 1 --dist=loadfile -m is_pipeline_test --make-reports=tests_torch_pipeline_gpu tests
|
||||
|
||||
- name: Failure short reports
|
||||
if: ${{ always() }}
|
||||
@ -104,60 +84,36 @@ jobs:
|
||||
name: run_all_tests_torch_gpu_test_reports
|
||||
path: reports
|
||||
|
||||
|
||||
run_all_tests_tf_gpu:
|
||||
runs-on: [self-hosted, gpu, single-gpu]
|
||||
runs-on: [self-hosted, docker-gpu, single-gpu]
|
||||
container:
|
||||
image: tensorflow/tensorflow:2.4.1-gpu
|
||||
options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Launcher docker
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Loading cache.
|
||||
uses: actions/cache@v2
|
||||
id: cache
|
||||
with:
|
||||
path: .env
|
||||
key: v1.1-slow_tests_tf_gpu-${{ hashFiles('setup.py') }}
|
||||
|
||||
- name: Python version
|
||||
- name: NVIDIA-SMI
|
||||
run: |
|
||||
which python
|
||||
python --version
|
||||
pip --version
|
||||
|
||||
- name: Current dir
|
||||
run: pwd
|
||||
- run: nvidia-smi
|
||||
|
||||
- name: Create new python env (on self-hosted runners we have to handle isolation ourselves)
|
||||
if: steps.cache.outputs.cache-hit != 'true'
|
||||
run: |
|
||||
python -m venv .env
|
||||
source .env/bin/activate
|
||||
which python
|
||||
python --version
|
||||
pip --version
|
||||
nvidia-smi
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
source .env/bin/activate
|
||||
pip install --upgrade pip
|
||||
pip install .[tf,sklearn,testing,onnxruntime,sentencepiece]
|
||||
pip install git+https://github.com/huggingface/datasets
|
||||
pip list
|
||||
pip install .[sklearn,testing,onnx,sentencepiece]
|
||||
|
||||
- name: Are GPUs recognized by our DL frameworks
|
||||
run: |
|
||||
source .env/bin/activate
|
||||
TF_CPP_MIN_LOG_LEVEL=3 python -c "import tensorflow as tf; print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU')))"
|
||||
TF_CPP_MIN_LOG_LEVEL=3 python -c "import tensorflow as tf; print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU')))"
|
||||
|
||||
- name: Run all tests on GPU
|
||||
env:
|
||||
OMP_NUM_THREADS: 1
|
||||
RUN_SLOW: yes
|
||||
TF_NUM_INTEROP_THREADS: 1
|
||||
TF_NUM_INTRAOP_THREADS: 16
|
||||
run: |
|
||||
source .env/bin/activate
|
||||
python -m pytest -n 1 --dist=loadfile -s --make-reports=tests_tf_gpu tests
|
||||
|
||||
python -m pytest -n 1 --dist=loadfile --make-reports=tests_tf_gpu tests
|
||||
|
||||
- name: Failure short reports
|
||||
if: ${{ always() }}
|
||||
run: cat reports/tests_tf_gpu_failures_short.txt
|
||||
@ -165,17 +121,15 @@ jobs:
|
||||
- name: Run all pipeline tests on GPU
|
||||
if: ${{ always() }}
|
||||
env:
|
||||
TF_FORCE_GPU_ALLOW_GROWTH: "true"
|
||||
OMP_NUM_THREADS: 1
|
||||
RUN_SLOW: yes
|
||||
RUN_PIPELINE_TESTS: yes
|
||||
TF_NUM_INTEROP_THREADS: 1
|
||||
TF_NUM_INTRAOP_THREADS: 16
|
||||
run: |
|
||||
source .env/bin/activate
|
||||
python -m pytest -n 1 --dist=loadfile -s -m is_pipeline_test --make-reports=tests_tf_pipelines_gpu tests
|
||||
python -m pytest -n 1 --dist=loadfile -m is_pipeline_test --make-reports=tests_tf_pipeline_gpu tests
|
||||
|
||||
- name: Failure short reports
|
||||
if: ${{ always() }}
|
||||
run: cat reports/tests_tf_pipelines_gpu_failures_short.txt
|
||||
run: cat reports/tests_tf_pipeline_gpu_failures_short.txt
|
||||
|
||||
- name: Test suite reports artifacts
|
||||
if: ${{ always() }}
|
||||
@ -183,86 +137,49 @@ jobs:
|
||||
with:
|
||||
name: run_all_tests_tf_gpu_test_reports
|
||||
path: reports
|
||||
|
||||
|
||||
run_all_tests_torch_multi_gpu:
|
||||
runs-on: [self-hosted, gpu, multi-gpu]
|
||||
runs-on: [self-hosted, docker-gpu, multi-gpu]
|
||||
container:
|
||||
image: pytorch/pytorch:1.8.0-cuda11.1-cudnn8-runtime
|
||||
options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Launcher docker
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Loading cache.
|
||||
uses: actions/cache@v2
|
||||
id: cache
|
||||
with:
|
||||
path: .env
|
||||
key: v1.1-slow_tests_torch_multi_gpu-${{ hashFiles('setup.py') }}
|
||||
|
||||
- name: Python version
|
||||
- name: NVIDIA-SMI
|
||||
run: |
|
||||
which python
|
||||
python --version
|
||||
pip --version
|
||||
|
||||
- name: Current dir
|
||||
run: pwd
|
||||
- run: nvidia-smi
|
||||
|
||||
- name: Create new python env (on self-hosted runners we have to handle isolation ourselves)
|
||||
if: steps.cache.outputs.cache-hit != 'true'
|
||||
run: |
|
||||
python -m venv .env
|
||||
source .env/bin/activate
|
||||
which python
|
||||
python --version
|
||||
pip --version
|
||||
nvidia-smi
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
source .env/bin/activate
|
||||
apt -y update && apt install -y libsndfile1-dev
|
||||
pip install --upgrade pip
|
||||
pip install .[torch,sklearn,testing,onnxruntime,sentencepiece]
|
||||
pip install git+https://github.com/huggingface/datasets
|
||||
pip list
|
||||
pip install .[sklearn,testing,onnxruntime,sentencepiece,speech]
|
||||
|
||||
- name: Are GPUs recognized by our DL frameworks
|
||||
run: |
|
||||
source .env/bin/activate
|
||||
python -c "import torch; print('Cuda available:', torch.cuda.is_available())"
|
||||
python -c "import torch; print('Cuda version:', torch.version.cuda)"
|
||||
python -c "import torch; print('CuDNN version:', torch.backends.cudnn.version())"
|
||||
python -c "import torch; print('Number of GPUs available:', torch.cuda.device_count())"
|
||||
|
||||
- name: Run all tests on multi-GPU
|
||||
- name: Run all tests on GPU
|
||||
env:
|
||||
OMP_NUM_THREADS: 1
|
||||
RUN_SLOW: yes
|
||||
MKL_SERVICE_FORCE_INTEL: 1
|
||||
run: |
|
||||
source .env/bin/activate
|
||||
python -m pytest -n 1 --dist=loadfile -s --make-reports=tests_torch_multi_gpu tests
|
||||
python -m pytest -n 1 --dist=loadfile --make-reports=tests_torch_multi_gpu tests
|
||||
|
||||
- name: Failure short reports
|
||||
if: ${{ always() }}
|
||||
run: cat reports/tests_torch_multi_gpu_failures_short.txt
|
||||
|
||||
- name: Run examples tests on multi-GPU
|
||||
env:
|
||||
OMP_NUM_THREADS: 1
|
||||
RUN_SLOW: yes
|
||||
run: |
|
||||
source .env/bin/activate
|
||||
python -m pytest -n 1 --dist=loadfile -s --make-reports=tests_torch_examples_multi_gpu examples
|
||||
|
||||
- name: Failure short reports
|
||||
if: ${{ always() }}
|
||||
run: cat reports/tests_torch_examples_multi_gpu_failures_short.txt
|
||||
|
||||
- name: Run all pipeline tests on multi-GPU
|
||||
- name: Run all pipeline tests on GPU
|
||||
if: ${{ always() }}
|
||||
env:
|
||||
TF_FORCE_GPU_ALLOW_GROWTH: "true"
|
||||
OMP_NUM_THREADS: 1
|
||||
RUN_SLOW: yes
|
||||
RUN_PIPELINE_TESTS: yes
|
||||
run: |
|
||||
source .env/bin/activate
|
||||
python -m pytest -n 1 --dist=loadfile -s -m is_pipeline_test --make-reports=tests_torch_pipeline_multi_gpu tests
|
||||
python -m pytest -n 1 --dist=loadfile -m is_pipeline_test --make-reports=tests_torch_pipeline_multi_gpu tests
|
||||
|
||||
- name: Failure short reports
|
||||
if: ${{ always() }}
|
||||
@ -276,73 +193,48 @@ jobs:
|
||||
path: reports
|
||||
|
||||
run_all_tests_tf_multi_gpu:
|
||||
runs-on: [self-hosted, gpu, multi-gpu]
|
||||
runs-on: [self-hosted, docker-gpu, multi-gpu]
|
||||
container:
|
||||
image: tensorflow/tensorflow:2.4.1-gpu
|
||||
options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Launcher docker
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Loading cache.
|
||||
uses: actions/cache@v2
|
||||
id: cache
|
||||
with:
|
||||
path: .env
|
||||
key: v1.1-slow_tests_tf_multi_gpu-${{ hashFiles('setup.py') }}
|
||||
|
||||
- name: Python version
|
||||
- name: NVIDIA-SMI
|
||||
run: |
|
||||
which python
|
||||
python --version
|
||||
pip --version
|
||||
|
||||
- name: Current dir
|
||||
run: pwd
|
||||
- run: nvidia-smi
|
||||
|
||||
- name: Create new python env (on self-hosted runners we have to handle isolation ourselves)
|
||||
if: steps.cache.outputs.cache-hit != 'true'
|
||||
run: |
|
||||
python -m venv .env
|
||||
source .env/bin/activate
|
||||
which python
|
||||
python --version
|
||||
pip --version
|
||||
nvidia-smi
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
source .env/bin/activate
|
||||
pip install --upgrade pip
|
||||
pip install .[tf,sklearn,testing,onnxruntime,sentencepiece]
|
||||
pip install git+https://github.com/huggingface/datasets
|
||||
pip list
|
||||
pip install .[sklearn,testing,onnx,sentencepiece]
|
||||
|
||||
- name: Are GPUs recognized by our DL frameworks
|
||||
run: |
|
||||
source .env/bin/activate
|
||||
TF_CPP_MIN_LOG_LEVEL=3 python -c "import tensorflow as tf; print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU')))"
|
||||
TF_CPP_MIN_LOG_LEVEL=3 python -c "import tensorflow as tf; print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU')))"
|
||||
|
||||
- name: Run all tests on multi-GPU
|
||||
- name: Run all tests on GPU
|
||||
env:
|
||||
OMP_NUM_THREADS: 1
|
||||
RUN_SLOW: yes
|
||||
TF_NUM_INTEROP_THREADS: 1
|
||||
TF_NUM_INTRAOP_THREADS: 16
|
||||
run: |
|
||||
source .env/bin/activate
|
||||
python -m pytest -n 1 --dist=loadfile -s --make-reports=tests_tf_multi_gpu tests
|
||||
python -m pytest -n 1 --dist=loadfile --make-reports=tests_tf_multi_gpu tests
|
||||
|
||||
- name: Failure short reports
|
||||
if: ${{ always() }}
|
||||
run: cat reports/tests_tf_multi_gpu_failures_short.txt
|
||||
|
||||
- name: Run all pipeline tests on multi-GPU
|
||||
- name: Run all pipeline tests on GPU
|
||||
if: ${{ always() }}
|
||||
env:
|
||||
TF_FORCE_GPU_ALLOW_GROWTH: "true"
|
||||
OMP_NUM_THREADS: 1
|
||||
RUN_SLOW: yes
|
||||
RUN_PIPELINE_TESTS: yes
|
||||
TF_NUM_INTEROP_THREADS: 1
|
||||
TF_NUM_INTRAOP_THREADS: 16
|
||||
run: |
|
||||
source .env/bin/activate
|
||||
python -m pytest -n 1 --dist=loadfile -s -m is_pipeline_test --make-reports=tests_tf_pipeline_multi_gpu tests
|
||||
|
||||
python -m pytest -n 1 --dist=loadfile -m is_pipeline_test --make-reports=tests_tf_pipeline_multi_gpu tests
|
||||
|
||||
- name: Failure short reports
|
||||
if: ${{ always() }}
|
||||
run: cat reports/tests_tf_pipeline_multi_gpu_failures_short.txt
|
||||
@ -353,4 +245,112 @@ jobs:
|
||||
with:
|
||||
name: run_all_tests_tf_multi_gpu_test_reports
|
||||
path: reports
|
||||
|
||||
|
||||
run_all_tests_torch_cuda_extensions_gpu:
|
||||
runs-on: [self-hosted, docker-gpu, single-gpu]
|
||||
container:
|
||||
image: nvcr.io/nvidia/pytorch:21.03-py3
|
||||
options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||
steps:
|
||||
- name: Launcher docker
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: NVIDIA-SMI
|
||||
run: |
|
||||
nvidia-smi
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
apt -y update && apt install -y libaio-dev
|
||||
pip install --upgrade pip
|
||||
pip install .[testing,deepspeed]
|
||||
|
||||
- name: Are GPUs recognized by our DL frameworks
|
||||
run: |
|
||||
python -c "import torch; print('Cuda available:', torch.cuda.is_available())"
|
||||
python -c "import torch; print('Cuda version:', torch.version.cuda)"
|
||||
python -c "import torch; print('CuDNN version:', torch.backends.cudnn.version())"
|
||||
python -c "import torch; print('Number of GPUs available:', torch.cuda.device_count())"
|
||||
|
||||
- name: Run all tests on GPU
|
||||
run: |
|
||||
python -m pytest -n 1 --dist=loadfile --make-reports=tests_torch_cuda_extensions_gpu tests/deepspeed tests/extended
|
||||
|
||||
- name: Failure short reports
|
||||
if: ${{ always() }}
|
||||
run: cat reports/tests_torch_cuda_extensions_gpu_failures_short.txt
|
||||
|
||||
- name: Test suite reports artifacts
|
||||
if: ${{ always() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: run_tests_torch_cuda_extensions_gpu_test_reports
|
||||
path: reports
|
||||
|
||||
run_all_tests_torch_cuda_extensions_multi_gpu:
|
||||
runs-on: [self-hosted, docker-gpu, multi-gpu]
|
||||
container:
|
||||
image: nvcr.io/nvidia/pytorch:21.03-py3
|
||||
options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||
steps:
|
||||
- name: Launcher docker
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: NVIDIA-SMI
|
||||
run: |
|
||||
nvidia-smi
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
apt -y update && apt install -y libaio-dev
|
||||
pip install --upgrade pip
|
||||
pip install .[testing,deepspeed,fairscale]
|
||||
|
||||
- name: Are GPUs recognized by our DL frameworks
|
||||
run: |
|
||||
python -c "import torch; print('Cuda available:', torch.cuda.is_available())"
|
||||
python -c "import torch; print('Cuda version:', torch.version.cuda)"
|
||||
python -c "import torch; print('CuDNN version:', torch.backends.cudnn.version())"
|
||||
python -c "import torch; print('Number of GPUs available:', torch.cuda.device_count())"
|
||||
|
||||
- name: Run all tests on GPU
|
||||
run: |
|
||||
python -m pytest -n 1 --dist=loadfile --make-reports=tests_torch_cuda_extensions_multi_gpu tests/deepspeed tests/extended
|
||||
|
||||
- name: Failure short reports
|
||||
if: ${{ always() }}
|
||||
run: cat reports/tests_torch_cuda_extensions_multi_gpu_failures_short.txt
|
||||
|
||||
- name: Test suite reports artifacts
|
||||
if: ${{ always() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: run_tests_torch_cuda_extensions_multi_gpu_test_reports
|
||||
path: reports
|
||||
|
||||
send_results:
|
||||
name: Send results to webhook
|
||||
runs-on: ubuntu-latest
|
||||
if: always()
|
||||
needs: [
|
||||
run_all_tests_torch_gpu,
|
||||
run_all_tests_tf_gpu,
|
||||
run_all_tests_torch_multi_gpu,
|
||||
run_all_tests_tf_multi_gpu,
|
||||
run_all_tests_torch_cuda_extensions_gpu,
|
||||
run_all_tests_torch_cuda_extensions_multi_gpu
|
||||
]
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- uses: actions/download-artifact@v2
|
||||
|
||||
- name: Send message to Slack
|
||||
env:
|
||||
CI_SLACK_BOT_TOKEN: ${{ secrets.CI_SLACK_BOT_TOKEN }}
|
||||
CI_SLACK_CHANNEL_ID: ${{ secrets.CI_SLACK_CHANNEL_ID }}
|
||||
|
||||
|
||||
run: |
|
||||
pip install slack_sdk
|
||||
python utils/notification_service.py scheduled
|
||||
|
||||
27
.github/workflows/stale.yml
vendored
Normal file
27
.github/workflows/stale.yml
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
name: Stale Bot
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: "0 15 * * *"
|
||||
|
||||
jobs:
|
||||
close_stale_issues:
|
||||
name: Close Stale Issues
|
||||
if: github.repository == 'huggingface/transformers'
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Setup Python
|
||||
uses: actions/setup-python@v1
|
||||
with:
|
||||
python-version: 3.7
|
||||
|
||||
- name: Install requirements
|
||||
run: |
|
||||
pip install PyGithub
|
||||
- name: Close stale issues
|
||||
run: |
|
||||
python scripts/stale.py
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@ -9,8 +9,7 @@ __pycache__/
|
||||
*.so
|
||||
|
||||
# tests and logs
|
||||
tests/fixtures/*
|
||||
!tests/fixtures/sample_text_no_unicode.txt
|
||||
tests/fixtures/cached_*_text.txt
|
||||
logs/
|
||||
lightning_logs/
|
||||
lang_code_data/
|
||||
|
||||
@ -36,6 +36,13 @@ There are 4 ways you can contribute to transformers:
|
||||
* Contributing to the examples or to the documentation;
|
||||
* Submitting issues related to bugs or desired new features.
|
||||
|
||||
In particular there is a special [Good First
|
||||
Issue](https://github.com/huggingface/transformers/contribute) listing. Tt will give you a list of
|
||||
open Issues that are open to anybody to work on. Just comment in the issue that you'd like to work
|
||||
on it. In that same listing you will also find some Issues with `Good Second Issue` label. These are
|
||||
typically slightly more complicated than the Issues with just `Good First Issue` label. But if you
|
||||
feel you know what you're doing, go for it.
|
||||
|
||||
*All are equally valuable to the community.*
|
||||
|
||||
## Submitting a new issue or feature request
|
||||
@ -46,7 +53,7 @@ feedback.
|
||||
|
||||
### Did you find a bug?
|
||||
|
||||
The transformers are robust and reliable thanks to the users who notify us of
|
||||
The 🤗 Transformers library is robust and reliable thanks to the users who notify us of
|
||||
the problems they encounter. So thank you for reporting an issue.
|
||||
|
||||
First, we would really appreciate it if you could **make sure the bug was not
|
||||
@ -285,7 +292,7 @@ $ python -m pytest -n auto --dist=loadfile -s -v ./tests/
|
||||
and for the examples:
|
||||
|
||||
```bash
|
||||
$ pip install -r examples/requirements.txt # only needed the first time
|
||||
$ pip install -r examples/xxx/requirements.txt # only needed the first time
|
||||
$ python -m pytest -n auto --dist=loadfile -s -v ./examples/
|
||||
```
|
||||
In fact, that's how `make test` and `make test-examples` are implemented (sans the `pip install` line)!
|
||||
@ -343,7 +350,7 @@ You can now use `make` from any terminal (Powershell, cmd.exe, etc) 🎉
|
||||
|
||||
### Syncing forked master with upstream (HuggingFace) master
|
||||
|
||||
To avoid pinging the upstream repository which adds reference notes to each upstream PR and sends unnessary notifications to the developers involved in these PRs,
|
||||
To avoid pinging the upstream repository which adds reference notes to each upstream PR and sends unnessary notifications to the developers involved in these PRs,
|
||||
when syncing the master branch of a forked repository, please, follow these steps:
|
||||
1. When possible, avoid syncing with the upstream using a branch and PR on the forked repository. Instead merge directly into the forked master.
|
||||
2. If a PR is absolutely necessary, use the following steps after checking out your branch:
|
||||
|
||||
@ -207,6 +207,8 @@ You are not required to read the following guidelines before opening an issue. H
|
||||
|
||||
Do not dispair if you can't figure it out from the begining, just share what you can and perhaps someone else will be able to help you at the forums.
|
||||
|
||||
If your setup involves any custom datasets, the best way to help us reproduce the problem is to create a [Google Colab notebook](https://colab.research.google.com/) that demonstrates the issue and once you verify that the issue still exists, include a link to that notebook in the Issue. Just make sure that you don't copy and paste the location bar url of the open notebook - as this is private and we won't be able to open it. Instead, you need to click on `Share` in the right upper corner of the notebook, select `Get Link` and then copy and paste the public link it will give to you.
|
||||
|
||||
7. If you forked off some of this project's code or example applications, please, do not ask us to go into your code repository and figure out what you may have done. The code is already very complex and unless there is an easy way to do a diff and it's a small diff, it won't be possible to find someone with time on their hands to make a lengthy investigation. Albeit, you might find someone at the forums who will be generous to do this for you.
|
||||
|
||||
8. Before reporting an issue, first, always try to update your environment to the latest official version of this library. We have no resources to go and debug older revisions, which could easily have bugs that have been fixed in the latest released version.
|
||||
|
||||
47
Makefile
47
Makefile
@ -1,5 +1,7 @@
|
||||
.PHONY: deps_table_update modified_only_fixup extra_quality_checks quality style fixup fix-copies test test-examples docs
|
||||
|
||||
# make sure to test the local checkout in scripts and not the pre-installed one (don't use quotes!)
|
||||
export PYTHONPATH = src
|
||||
|
||||
check_dirs := examples tests src utils
|
||||
|
||||
@ -19,33 +21,44 @@ modified_only_fixup:
|
||||
deps_table_update:
|
||||
@python setup.py deps_table_update
|
||||
|
||||
# autogenerating code
|
||||
|
||||
autogenerate_code: deps_table_update
|
||||
python utils/class_mapping_update.py
|
||||
|
||||
# Check that source code meets quality standards
|
||||
|
||||
extra_quality_checks: deps_table_update
|
||||
extra_quality_checks:
|
||||
python utils/check_copies.py
|
||||
python utils/check_table.py
|
||||
python utils/check_dummies.py
|
||||
python utils/check_repo.py
|
||||
python utils/style_doc.py src/transformers docs/source --max_len 119
|
||||
python utils/check_inits.py
|
||||
|
||||
# this target runs checks on all files
|
||||
quality:
|
||||
black --check $(check_dirs)
|
||||
isort --check-only $(check_dirs)
|
||||
python utils/custom_init_isort.py --check_only
|
||||
flake8 $(check_dirs)
|
||||
python utils/style_doc.py src/transformers docs/source --max_len 119 --check_only
|
||||
${MAKE} extra_quality_checks
|
||||
|
||||
# Format source code automatically and check is there are any problems left that need manual fixing
|
||||
|
||||
style: deps_table_update
|
||||
extra_style_checks:
|
||||
python utils/custom_init_isort.py
|
||||
python utils/style_doc.py src/transformers docs/source --max_len 119
|
||||
|
||||
# this target runs checks on all files and potentially modifies some of them
|
||||
style:
|
||||
black $(check_dirs)
|
||||
isort $(check_dirs)
|
||||
python utils/style_doc.py src/transformers docs/source --max_len 119
|
||||
${MAKE} autogenerate_code
|
||||
${MAKE} extra_style_checks
|
||||
|
||||
# Super fast fix and check target that only works on relevant modified files since the branch was made
|
||||
|
||||
fixup: modified_only_fixup extra_quality_checks
|
||||
fixup: modified_only_fixup extra_style_checks autogenerate_code extra_quality_checks
|
||||
|
||||
# Make marked copies of snippets of codes conform to the original
|
||||
|
||||
@ -62,9 +75,29 @@ test:
|
||||
# Run tests for examples
|
||||
|
||||
test-examples:
|
||||
python -m pytest -n auto --dist=loadfile -s -v ./examples/
|
||||
python -m pytest -n auto --dist=loadfile -s -v ./examples/pytorch/
|
||||
|
||||
# Run tests for SageMaker DLC release
|
||||
|
||||
test-sagemaker: # install sagemaker dependencies in advance with pip install .[sagemaker]
|
||||
TEST_SAGEMAKER=True python -m pytest -n auto -s -v ./tests/sagemaker
|
||||
|
||||
|
||||
# Check that docs can build
|
||||
|
||||
docs:
|
||||
cd docs && make html SPHINXOPTS="-W -j 4"
|
||||
|
||||
# Release stuff
|
||||
|
||||
pre-release:
|
||||
python utils/release.py
|
||||
|
||||
pre-patch:
|
||||
python utils/release.py --patch
|
||||
|
||||
post-release:
|
||||
python utils/release.py --post_release
|
||||
|
||||
post-patch:
|
||||
python utils/release.py --post_release --patch
|
||||
|
||||
78
README.md
78
README.md
@ -38,24 +38,24 @@ limitations under the License.
|
||||
</p>
|
||||
|
||||
<h3 align="center">
|
||||
<p>State-of-the-art Natural Language Processing for PyTorch and TensorFlow 2.0
|
||||
<p>State-of-the-art Natural Language Processing for Jax, PyTorch and TensorFlow
|
||||
</h3>
|
||||
|
||||
🤗 Transformers provides thousands of pretrained models to perform tasks on texts such as classification, information extraction, question answering, summarization, translation, text generation, etc in 100+ languages. Its aim is to make cutting-edge NLP easier to use for everyone.
|
||||
🤗 Transformers provides thousands of pretrained models to perform tasks on texts such as classification, information extraction, question answering, summarization, translation, text generation and more in over 100 languages. Its aim is to make cutting-edge NLP easier to use for everyone.
|
||||
|
||||
🤗 Transformers provides APIs to quickly download and use those pretrained models on a given text, fine-tune them on your own datasets then share them with the community on our [model hub](https://huggingface.co/models). At the same time, each python module defining an architecture can be used as a standalone and modified to enable quick research experiments.
|
||||
🤗 Transformers provides APIs to quickly download and use those pretrained models on a given text, fine-tune them on your own datasets and then share them with the community on our [model hub](https://huggingface.co/models). At the same time, each python module defining an architecture is fully standalone and can be modified to enable quick research experiments.
|
||||
|
||||
🤗 Transformers is backed by the two most popular deep learning libraries, [PyTorch](https://pytorch.org/) and [TensorFlow](https://www.tensorflow.org/), with a seamless integration between them, allowing you to train your models with one then load it for inference with the other.
|
||||
🤗 Transformers is backed by the three most popular deep learning libraries — [Jax](https://jax.readthedocs.io/en/latest/), [PyTorch](https://pytorch.org/) and [TensorFlow](https://www.tensorflow.org/) — with a seamless integration between them. It's straightforward to train your models with one before loading them for inference with the other.
|
||||
|
||||
## Online demos
|
||||
|
||||
You can test most of our models directly on their pages from the [model hub](https://huggingface.co/models). We also offer [private model hosting, versioning, & an inference API](https://huggingface.co/pricing) to use those models.
|
||||
You can test most of our models directly on their pages from the [model hub](https://huggingface.co/models). We also offer [private model hosting, versioning, & an inference API](https://huggingface.co/pricing) for public and private models.
|
||||
|
||||
Here are a few examples:
|
||||
- [Masked word completion with BERT](https://huggingface.co/bert-base-uncased?text=Paris+is+the+%5BMASK%5D+of+France)
|
||||
- [Name Entity Recognition with Electra](https://huggingface.co/dbmdz/electra-large-discriminator-finetuned-conll03-english?text=My+name+is+Sarah+and+I+live+in+London+city)
|
||||
- [Text generation with GPT-2](https://huggingface.co/gpt2?text=A+long+time+ago%2C+)
|
||||
- [Natural Langugage Inference with RoBERTa](https://huggingface.co/roberta-large-mnli?text=The+dog+was+lost.+Nobody+lost+any+animal)
|
||||
- [Natural Language Inference with RoBERTa](https://huggingface.co/roberta-large-mnli?text=The+dog+was+lost.+Nobody+lost+any+animal)
|
||||
- [Summarization with BART](https://huggingface.co/facebook/bart-large-cnn?text=The+tower+is+324+metres+%281%2C063+ft%29+tall%2C+about+the+same+height+as+an+81-storey+building%2C+and+the+tallest+structure+in+Paris.+Its+base+is+square%2C+measuring+125+metres+%28410+ft%29+on+each+side.+During+its+construction%2C+the+Eiffel+Tower+surpassed+the+Washington+Monument+to+become+the+tallest+man-made+structure+in+the+world%2C+a+title+it+held+for+41+years+until+the+Chrysler+Building+in+New+York+City+was+finished+in+1930.+It+was+the+first+structure+to+reach+a+height+of+300+metres.+Due+to+the+addition+of+a+broadcasting+aerial+at+the+top+of+the+tower+in+1957%2C+it+is+now+taller+than+the+Chrysler+Building+by+5.2+metres+%2817+ft%29.+Excluding+transmitters%2C+the+Eiffel+Tower+is+the+second+tallest+free-standing+structure+in+France+after+the+Millau+Viaduct)
|
||||
- [Question answering with DistilBERT](https://huggingface.co/distilbert-base-uncased-distilled-squad?text=Which+name+is+also+used+to+describe+the+Amazon+rainforest+in+English%3F&context=The+Amazon+rainforest+%28Portuguese%3A+Floresta+Amaz%C3%B4nica+or+Amaz%C3%B4nia%3B+Spanish%3A+Selva+Amaz%C3%B3nica%2C+Amazon%C3%ADa+or+usually+Amazonia%3B+French%3A+For%C3%AAt+amazonienne%3B+Dutch%3A+Amazoneregenwoud%29%2C+also+known+in+English+as+Amazonia+or+the+Amazon+Jungle%2C+is+a+moist+broadleaf+forest+that+covers+most+of+the+Amazon+basin+of+South+America.+This+basin+encompasses+7%2C000%2C000+square+kilometres+%282%2C700%2C000+sq+mi%29%2C+of+which+5%2C500%2C000+square+kilometres+%282%2C100%2C000+sq+mi%29+are+covered+by+the+rainforest.+This+region+includes+territory+belonging+to+nine+nations.+The+majority+of+the+forest+is+contained+within+Brazil%2C+with+60%25+of+the+rainforest%2C+followed+by+Peru+with+13%25%2C+Colombia+with+10%25%2C+and+with+minor+amounts+in+Venezuela%2C+Ecuador%2C+Bolivia%2C+Guyana%2C+Suriname+and+French+Guiana.+States+or+departments+in+four+nations+contain+%22Amazonas%22+in+their+names.+The+Amazon+represents+over+half+of+the+planet%27s+remaining+rainforests%2C+and+comprises+the+largest+and+most+biodiverse+tract+of+tropical+rainforest+in+the+world%2C+with+an+estimated+390+billion+individual+trees+divided+into+16%2C000+species)
|
||||
- [Translation with T5](https://huggingface.co/t5-base?text=My+name+is+Wolfgang+and+I+live+in+Berlin)
|
||||
@ -64,20 +64,20 @@ Here are a few examples:
|
||||
|
||||
## Quick tour
|
||||
|
||||
To immediately use a model on a given text, we provide the `pipeline` API. Pipelines group together a pretrained model with the preprocessing that was used during that model training. Here is how to quickly use a pipeline to classify positive versus negative texts
|
||||
To immediately use a model on a given text, we provide the `pipeline` API. Pipelines group together a pretrained model with the preprocessing that was used during that model's training. Here is how to quickly use a pipeline to classify positive versus negative texts:
|
||||
|
||||
```python
|
||||
>>> from transformers import pipeline
|
||||
|
||||
# Allocate a pipeline for sentiment-analysis
|
||||
>>> classifier = pipeline('sentiment-analysis')
|
||||
>>> classifier('We are very happy to include pipeline into the transformers repository.')
|
||||
[{'label': 'POSITIVE', 'score': 0.9978193640708923}]
|
||||
>>> classifier('We are very happy to introduce pipeline to the transformers repository.')
|
||||
[{'label': 'POSITIVE', 'score': 0.9996980428695679}]
|
||||
```
|
||||
|
||||
The second line of code downloads and caches the pretrained model used by the pipeline, the third line evaluates it on the given text. Here the answer is "positive" with a confidence of 99.8%.
|
||||
The second line of code downloads and caches the pretrained model used by the pipeline, while the third evaluates it on the given text. Here the answer is "positive" with a confidence of 99.97%.
|
||||
|
||||
This is another example of pipeline used for that can extract question answers from some context:
|
||||
Many NLP tasks have a pre-trained `pipeline` ready to go. For example, we can easily extract question answers given context:
|
||||
|
||||
``` python
|
||||
>>> from transformers import pipeline
|
||||
@ -86,15 +86,15 @@ This is another example of pipeline used for that can extract question answers f
|
||||
>>> question_answerer = pipeline('question-answering')
|
||||
>>> question_answerer({
|
||||
... 'question': 'What is the name of the repository ?',
|
||||
... 'context': 'Pipeline have been included in the huggingface/transformers repository'
|
||||
... 'context': 'Pipeline has been included in the huggingface/transformers repository'
|
||||
... })
|
||||
{'score': 0.5135612454720828, 'start': 35, 'end': 59, 'answer': 'huggingface/transformers'}
|
||||
{'score': 0.30970096588134766, 'start': 34, 'end': 58, 'answer': 'huggingface/transformers'}
|
||||
|
||||
```
|
||||
|
||||
On top of the answer, the pretrained model used here returned its confidence score, along with the start position and its end position in the tokenized sentence. You can learn more about the tasks supported by the `pipeline` API in [this tutorial](https://huggingface.co/transformers/task_summary.html).
|
||||
In addition to the answer, the pretrained model used here returned its confidence score, along with the start position and end position of the answer in the tokenized sentence. You can learn more about the tasks supported by the `pipeline` API in [this tutorial](https://huggingface.co/transformers/task_summary.html).
|
||||
|
||||
To download and use any of the pretrained models on your given task, you just need to use those three lines of codes (PyTorch version):
|
||||
To download and use any of the pretrained models on your given task, all it takes is three lines of code. Here is the PyTorch version:
|
||||
```python
|
||||
>>> from transformers import AutoTokenizer, AutoModel
|
||||
|
||||
@ -104,7 +104,7 @@ To download and use any of the pretrained models on your given task, you just ne
|
||||
>>> inputs = tokenizer("Hello world!", return_tensors="pt")
|
||||
>>> outputs = model(**inputs)
|
||||
```
|
||||
or for TensorFlow:
|
||||
And here is the equivalent code for TensorFlow:
|
||||
```python
|
||||
>>> from transformers import AutoTokenizer, TFAutoModel
|
||||
|
||||
@ -115,9 +115,9 @@ or for TensorFlow:
|
||||
>>> outputs = model(**inputs)
|
||||
```
|
||||
|
||||
The tokenizer is responsible for all the preprocessing the pretrained model expects, and can be called directly on one (or list) of texts (as we can see on the fourth line of both code examples). It will output a dictionary you can directly pass to your model (which is done on the fifth line).
|
||||
The tokenizer is responsible for all the preprocessing the pretrained model expects, and can be called directly on a single string (as in the above examples) or a list. It will output a dictionary that you can use in downstream code or simply directly pass to your model using the ** argument unpacking operator.
|
||||
|
||||
The model itself is a regular [Pytorch `nn.Module`](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) or a [TensorFlow `tf.keras.Model`](https://www.tensorflow.org/api_docs/python/tf/keras/Model) (depending on your backend) which you can use normally. For instance, [this tutorial](https://huggingface.co/transformers/training.html) explains how to integrate such a model in classic PyTorch or TensorFlow training loop, or how to use our `Trainer` API to quickly fine-tune the on a new dataset.
|
||||
The model itself is a regular [Pytorch `nn.Module`](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) or a [TensorFlow `tf.keras.Model`](https://www.tensorflow.org/api_docs/python/tf/keras/Model) (depending on your backend) which you can use normally. [This tutorial](https://huggingface.co/transformers/training.html) explains how to integrate such a model into a classic PyTorch or TensorFlow training loop, or how to use our `Trainer` API to quickly fine-tune on a new dataset.
|
||||
|
||||
## Why should I use transformers?
|
||||
|
||||
@ -135,16 +135,16 @@ The model itself is a regular [Pytorch `nn.Module`](https://pytorch.org/docs/sta
|
||||
1. Choose the right framework for every part of a model's lifetime:
|
||||
- Train state-of-the-art models in 3 lines of code.
|
||||
- Move a single model between TF2.0/PyTorch frameworks at will.
|
||||
- Seamlessly pick the right framework for training, evaluation, production.
|
||||
- Seamlessly pick the right framework for training, evaluation and production.
|
||||
|
||||
1. Easily customize a model or an example to your needs:
|
||||
- Examples for each architecture to reproduce the results by the official authors of said architecture.
|
||||
- Expose the models internal as consistently as possible.
|
||||
- We provide examples for each architecture to reproduce the results published by its original authors.
|
||||
- Model internals are exposed as consistently as possible.
|
||||
- Model files can be used independently of the library for quick experiments.
|
||||
|
||||
## Why shouldn't I use transformers?
|
||||
|
||||
- This library is not a modular toolbox of building blocks for neural nets. The code in the model files is not refactored with additional abstractions on purpose, so that researchers can quickly iterate on each of the models without diving in additional abstractions/files.
|
||||
- This library is not a modular toolbox of building blocks for neural nets. The code in the model files is not refactored with additional abstractions on purpose, so that researchers can quickly iterate on each of the models without diving into additional abstractions/files.
|
||||
- The training API is not intended to work on any model but is optimized to work with the models provided by the library. For generic machine learning loops, you should use another library.
|
||||
- While we strive to present as many use cases as possible, the scripts in our [examples folder](https://github.com/huggingface/transformers/tree/master/examples) are just that: examples. It is expected that they won't work out-of-the box on your specific problem and that you will be required to change a few lines of code to adapt them to your needs.
|
||||
|
||||
@ -152,16 +152,16 @@ The model itself is a regular [Pytorch `nn.Module`](https://pytorch.org/docs/sta
|
||||
|
||||
### With pip
|
||||
|
||||
This repository is tested on Python 3.6+, PyTorch 1.0.0+ (PyTorch 1.3.1+ for [examples](https://github.com/huggingface/transformers/tree/master/examples)) and TensorFlow 2.0.
|
||||
This repository is tested on Python 3.6+, Flax 0.3.2+, PyTorch 1.3.1+ and TensorFlow 2.3+.
|
||||
|
||||
You should install 🤗 Transformers in a [virtual environment](https://docs.python.org/3/library/venv.html). If you're unfamiliar with Python virtual environments, check out the [user guide](https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/).
|
||||
|
||||
First, create a virtual environment with the version of Python you're going to use and activate it.
|
||||
|
||||
Then, you will need to install at least one of TensorFlow 2.0, PyTorch or Flax.
|
||||
Please refer to [TensorFlow installation page](https://www.tensorflow.org/install/pip#tensorflow-2.0-rc-is-available), [PyTorch installation page](https://pytorch.org/get-started/locally/#start-locally) regarding the specific install command for your platform and/or [Flax installation page](https://github.com/google/flax#quick-install).
|
||||
Then, you will need to install at least one of Flax, PyTorch or TensorFlow.
|
||||
Please refer to [TensorFlow installation page](https://www.tensorflow.org/install/), [PyTorch installation page](https://pytorch.org/get-started/locally/#start-locally) and/or [Flax installation page](https://github.com/google/flax#quick-install) regarding the specific install command for your platform.
|
||||
|
||||
When TensorFlow 2.0 and/or PyTorch has been installed, 🤗 Transformers can be installed using pip as follows:
|
||||
When one of those backends has been installed, 🤗 Transformers can be installed using pip as follows:
|
||||
|
||||
```bash
|
||||
pip install transformers
|
||||
@ -179,9 +179,9 @@ Since Transformers version v4.0.0, we now have a conda channel: `huggingface`.
|
||||
conda install -c huggingface transformers
|
||||
```
|
||||
|
||||
Follow the installation pages of TensorFlow, PyTorch or Flax to see how to install them with conda.
|
||||
Follow the installation pages of Flax, PyTorch or TensorFlow to see how to install them with conda.
|
||||
|
||||
## Models architectures
|
||||
## Model architectures
|
||||
|
||||
**[All the model checkpoints](https://huggingface.co/models)** provided by 🤗 Transformers are seamlessly integrated from the huggingface.co [model hub](https://huggingface.co) where they are uploaded directly by [users](https://huggingface.co/users) and [organizations](https://huggingface.co/organizations).
|
||||
|
||||
@ -194,13 +194,19 @@ Current number of checkpoints: ** (from École polytechnique) released with the paper [BARThez: a Skilled Pretrained French Sequence-to-Sequence Model](https://arxiv.org/abs/2010.12321) by Moussa Kamal Eddine, Antoine J.-P. Tixier, Michalis Vazirgiannis.
|
||||
1. **[BERT](https://huggingface.co/transformers/model_doc/bert.html)** (from Google) released with the paper [BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding](https://arxiv.org/abs/1810.04805) by Jacob Devlin, Ming-Wei Chang, Kenton Lee and Kristina Toutanova.
|
||||
1. **[BERT For Sequence Generation](https://huggingface.co/transformers/model_doc/bertgeneration.html)** (from Google) released with the paper [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://arxiv.org/abs/1907.12461) by Sascha Rothe, Shashi Narayan, Aliaksei Severyn.
|
||||
1. **[BigBird-RoBERTa](https://huggingface.co/transformers/model_doc/bigbird.html)** (from Google Research) released with the paper [Big Bird: Transformers for Longer Sequences](https://arxiv.org/abs/2007.14062) by Manzil Zaheer, Guru Guruganesh, Avinava Dubey, Joshua Ainslie, Chris Alberti, Santiago Ontanon, Philip Pham, Anirudh Ravula, Qifan Wang, Li Yang, Amr Ahmed.
|
||||
1. **[BigBird-Pegasus](https://huggingface.co/transformers/model_doc/bigbird_pegasus.html)** (from Google Research) released with the paper [Big Bird: Transformers for Longer Sequences](https://arxiv.org/abs/2007.14062) by Manzil Zaheer, Guru Guruganesh, Avinava Dubey, Joshua Ainslie, Chris Alberti, Santiago Ontanon, Philip Pham, Anirudh Ravula, Qifan Wang, Li Yang, Amr Ahmed.
|
||||
1. **[Blenderbot](https://huggingface.co/transformers/model_doc/blenderbot.html)** (from Facebook) released with the paper [Recipes for building an open-domain chatbot](https://arxiv.org/abs/2004.13637) by Stephen Roller, Emily Dinan, Naman Goyal, Da Ju, Mary Williamson, Yinhan Liu, Jing Xu, Myle Ott, Kurt Shuster, Eric M. Smith, Y-Lan Boureau, Jason Weston.
|
||||
1. **[BlenderbotSmall](https://huggingface.co/transformers/model_doc/blenderbot_small.html)** (from Facebook) released with the paper [Recipes for building an open-domain chatbot](https://arxiv.org/abs/2004.13637) by Stephen Roller, Emily Dinan, Naman Goyal, Da Ju, Mary Williamson, Yinhan Liu, Jing Xu, Myle Ott, Kurt Shuster, Eric M. Smith, Y-Lan Boureau, Jason Weston.
|
||||
1. **[BORT](https://huggingface.co/transformers/model_doc/bort.html)** (from Alexa) released with the paper [Optimal Subarchitecture Extraction For BERT](https://arxiv.org/abs/2010.10499) by Adrian de Wynter and Daniel J. Perry.
|
||||
1. **[CamemBERT](https://huggingface.co/transformers/model_doc/camembert.html)** (from Inria/Facebook/Sorbonne) released with the paper [CamemBERT: a Tasty French Language Model](https://arxiv.org/abs/1911.03894) by Louis Martin*, Benjamin Muller*, Pedro Javier Ortiz Suárez*, Yoann Dupont, Laurent Romary, Éric Villemonte de la Clergerie, Djamé Seddah and Benoît Sagot.
|
||||
1. **[CLIP](https://huggingface.co/transformers/model_doc/clip.html)** from (OpenAI) released with the paper [Learning Transferable Visual Models From Natural Language Supervision](https://arxiv.org/abs/2103.00020) by Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, Ilya Sutskever.
|
||||
1. **[ConvBERT](https://huggingface.co/transformers/model_doc/convbert.html)** (from YituTech) released with the paper [ConvBERT: Improving BERT with Span-based Dynamic Convolution](https://arxiv.org/abs/2008.02496) by Zihang Jiang, Weihao Yu, Daquan Zhou, Yunpeng Chen, Jiashi Feng, Shuicheng Yan.
|
||||
1. **[CPM](https://huggingface.co/transformers/model_doc/cpm.html)** (from Tsinghua University) released with the paper [CPM: A Large-scale Generative Chinese Pre-trained Language Model](https://arxiv.org/abs/2012.00413) by Zhengyan Zhang, Xu Han, Hao Zhou, Pei Ke, Yuxian Gu, Deming Ye, Yujia Qin, Yusheng Su, Haozhe Ji, Jian Guan, Fanchao Qi, Xiaozhi Wang, Yanan Zheng, Guoyang Zeng, Huanqi Cao, Shengqi Chen, Daixuan Li, Zhenbo Sun, Zhiyuan Liu, Minlie Huang, Wentao Han, Jie Tang, Juanzi Li, Xiaoyan Zhu, Maosong Sun.
|
||||
1. **[CTRL](https://huggingface.co/transformers/model_doc/ctrl.html)** (from Salesforce) released with the paper [CTRL: A Conditional Transformer Language Model for Controllable Generation](https://arxiv.org/abs/1909.05858) by Nitish Shirish Keskar*, Bryan McCann*, Lav R. Varshney, Caiming Xiong and Richard Socher.
|
||||
1. **[DeBERTa](https://huggingface.co/transformers/model_doc/deberta.html)** (from Microsoft Research) released with the paper [DeBERTa: Decoding-enhanced BERT with Disentangled Attention](https://arxiv.org/abs/2006.03654) by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen.
|
||||
1. **[DeBERTa](https://huggingface.co/transformers/model_doc/deberta.html)** (from Microsoft) released with the paper [DeBERTa: Decoding-enhanced BERT with Disentangled Attention](https://arxiv.org/abs/2006.03654) by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen.
|
||||
1. **[DeBERTa-v2](https://huggingface.co/transformers/model_doc/deberta_v2.html)** (from Microsoft) released with the paper [DeBERTa: Decoding-enhanced BERT with Disentangled Attention](https://arxiv.org/abs/2006.03654) by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen.
|
||||
1. **[DeiT](https://huggingface.co/transformers/model_doc/deit.html)** (from Facebook) released with the paper [Training data-efficient image transformers & distillation through attention](https://arxiv.org/abs/2012.12877) by Hugo Touvron, Matthieu Cord, Matthijs Douze, Francisco Massa, Alexandre Sablayrolles, Hervé Jégou.
|
||||
1. **[DialoGPT](https://huggingface.co/transformers/model_doc/dialogpt.html)** (from Microsoft Research) released with the paper [DialoGPT: Large-Scale Generative Pre-training for Conversational Response Generation](https://arxiv.org/abs/1911.00536) by Yizhe Zhang, Siqi Sun, Michel Galley, Yen-Chun Chen, Chris Brockett, Xiang Gao, Jianfeng Gao, Jingjing Liu, Bill Dolan.
|
||||
1. **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace), released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same method has been applied to compress GPT2 into [DistilGPT2](https://github.com/huggingface/transformers/tree/master/examples/distillation), RoBERTa into [DistilRoBERTa](https://github.com/huggingface/transformers/tree/master/examples/distillation), Multilingual BERT into [DistilmBERT](https://github.com/huggingface/transformers/tree/master/examples/distillation) and a German version of DistilBERT.
|
||||
1. **[DPR](https://huggingface.co/transformers/model_doc/dpr.html)** (from Facebook) released with the paper [Dense Passage Retrieval
|
||||
@ -211,32 +217,42 @@ Min, Patrick Lewis, Ledell Wu, Sergey Edunov, Danqi Chen, and Wen-tau Yih.
|
||||
1. **[Funnel Transformer](https://huggingface.co/transformers/model_doc/funnel.html)** (from CMU/Google Brain) released with the paper [Funnel-Transformer: Filtering out Sequential Redundancy for Efficient Language Processing](https://arxiv.org/abs/2006.03236) by Zihang Dai, Guokun Lai, Yiming Yang, Quoc V. Le.
|
||||
1. **[GPT](https://huggingface.co/transformers/model_doc/gpt.html)** (from OpenAI) released with the paper [Improving Language Understanding by Generative Pre-Training](https://blog.openai.com/language-unsupervised/) by Alec Radford, Karthik Narasimhan, Tim Salimans and Ilya Sutskever.
|
||||
1. **[GPT-2](https://huggingface.co/transformers/model_doc/gpt2.html)** (from OpenAI) released with the paper [Language Models are Unsupervised Multitask Learners](https://blog.openai.com/better-language-models/) by Alec Radford*, Jeffrey Wu*, Rewon Child, David Luan, Dario Amodei** and Ilya Sutskever**.
|
||||
1. **[GPT Neo](https://huggingface.co/transformers/model_doc/gpt_neo.html)** (from EleutherAI) released in the repository [EleutherAI/gpt-neo](https://github.com/EleutherAI/gpt-neo) by Sid Black, Stella Biderman, Leo Gao, Phil Wang and Connor Leahy.
|
||||
1. **[I-BERT](https://huggingface.co/transformers/model_doc/ibert.html)** (from Berkeley) released with the paper [I-BERT: Integer-only BERT Quantization](https://arxiv.org/abs/2101.01321) by Sehoon Kim, Amir Gholami, Zhewei Yao, Michael W. Mahoney, Kurt Keutzer
|
||||
1. **[LayoutLM](https://huggingface.co/transformers/model_doc/layoutlm.html)** (from Microsoft Research Asia) released with the paper [LayoutLM: Pre-training of Text and Layout for Document Image Understanding](https://arxiv.org/abs/1912.13318) by Yiheng Xu, Minghao Li, Lei Cui, Shaohan Huang, Furu Wei, Ming Zhou.
|
||||
1. **[LED](https://huggingface.co/transformers/model_doc/led.html)** (from AllenAI) released with the paper [Longformer: The Long-Document Transformer](https://arxiv.org/abs/2004.05150) by Iz Beltagy, Matthew E. Peters, Arman Cohan.
|
||||
1. **[Longformer](https://huggingface.co/transformers/model_doc/longformer.html)** (from AllenAI) released with the paper [Longformer: The Long-Document Transformer](https://arxiv.org/abs/2004.05150) by Iz Beltagy, Matthew E. Peters, Arman Cohan.
|
||||
1. **[LUKE](https://huggingface.co/transformers/model_doc/luke.html)** (from Studio Ousia) released with the paper [LUKE: Deep Contextualized Entity Representations with Entity-aware Self-attention](https://arxiv.org/abs/2010.01057) by Ikuya Yamada, Akari Asai, Hiroyuki Shindo, Hideaki Takeda, Yuji Matsumoto.
|
||||
1. **[LXMERT](https://huggingface.co/transformers/model_doc/lxmert.html)** (from UNC Chapel Hill) released with the paper [LXMERT: Learning Cross-Modality Encoder Representations from Transformers for Open-Domain Question Answering](https://arxiv.org/abs/1908.07490) by Hao Tan and Mohit Bansal.
|
||||
1. **[M2M100](https://huggingface.co/transformers/model_doc/m2m_100.html)** (from Facebook) released with the paper [Beyond English-Centric Multilingual Machine Translation](https://arxiv.org/abs/2010.11125) by by Angela Fan, Shruti Bhosale, Holger Schwenk, Zhiyi Ma, Ahmed El-Kishky, Siddharth Goyal, Mandeep Baines, Onur Celebi, Guillaume Wenzek, Vishrav Chaudhary, Naman Goyal, Tom Birch, Vitaliy Liptchinsky, Sergey Edunov, Edouard Grave, Michael Auli, Armand Joulin.
|
||||
1. **[MarianMT](https://huggingface.co/transformers/model_doc/marian.html)** Machine translation models trained using [OPUS](http://opus.nlpl.eu/) data by Jörg Tiedemann. The [Marian Framework](https://marian-nmt.github.io/) is being developed by the Microsoft Translator Team.
|
||||
1. **[MBart](https://huggingface.co/transformers/model_doc/mbart.html)** (from Facebook) released with the paper [Multilingual Denoising Pre-training for Neural Machine Translation](https://arxiv.org/abs/2001.08210) by Yinhan Liu, Jiatao Gu, Naman Goyal, Xian Li, Sergey Edunov, Marjan Ghazvininejad, Mike Lewis, Luke Zettlemoyer.
|
||||
1. **[MBart-50](https://huggingface.co/transformers/model_doc/mbart.html)** (from Facebook) released with the paper [Multilingual Translation with Extensible Multilingual Pretraining and Finetuning](https://arxiv.org/abs/2008.00401) by Yuqing Tang, Chau Tran, Xian Li, Peng-Jen Chen, Naman Goyal, Vishrav Chaudhary, Jiatao Gu, Angela Fan.
|
||||
1. **[Megatron-BERT](https://huggingface.co/transformers/model_doc/megatron_bert.html)** (from NVIDIA) released with the paper [Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism](https://arxiv.org/abs/1909.08053) by Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper and Bryan Catanzaro.
|
||||
1. **[Megatron-GPT2](https://huggingface.co/transformers/model_doc/megatron_gpt2.html)** (from NVIDIA) released with the paper [Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism](https://arxiv.org/abs/1909.08053) by Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper and Bryan Catanzaro.
|
||||
1. **[MPNet](https://huggingface.co/transformers/model_doc/mpnet.html)** (from Microsoft Research) released with the paper [MPNet: Masked and Permuted Pre-training for Language Understanding](https://arxiv.org/abs/2004.09297) by Kaitao Song, Xu Tan, Tao Qin, Jianfeng Lu, Tie-Yan Liu.
|
||||
1. **[MT5](https://huggingface.co/transformers/model_doc/mt5.html)** (from Google AI) released with the paper [mT5: A massively multilingual pre-trained text-to-text transformer](https://arxiv.org/abs/2010.11934) by Linting Xue, Noah Constant, Adam Roberts, Mihir Kale, Rami Al-Rfou, Aditya Siddhant, Aditya Barua, Colin Raffel.
|
||||
1. **[Pegasus](https://huggingface.co/transformers/model_doc/pegasus.html)** (from Google) released with the paper [PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive Summarization](https://arxiv.org/abs/1912.08777)> by Jingqing Zhang, Yao Zhao, Mohammad Saleh and Peter J. Liu.
|
||||
1. **[ProphetNet](https://huggingface.co/transformers/model_doc/prophetnet.html)** (from Microsoft Research) released with the paper [ProphetNet: Predicting Future N-gram for Sequence-to-Sequence Pre-training](https://arxiv.org/abs/2001.04063) by Yu Yan, Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang and Ming Zhou.
|
||||
1. **[Reformer](https://huggingface.co/transformers/model_doc/reformer.html)** (from Google Research) released with the paper [Reformer: The Efficient Transformer](https://arxiv.org/abs/2001.04451) by Nikita Kitaev, Łukasz Kaiser, Anselm Levskaya.
|
||||
1. **[RoBERTa](https://huggingface.co/transformers/model_doc/roberta.html)** (from Facebook), released together with the paper a [Robustly Optimized BERT Pretraining Approach](https://arxiv.org/abs/1907.11692) by Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, Veselin Stoyanov.
|
||||
1. **[SpeechToTextTransformer](https://huggingface.co/transformers/model_doc/speech_to_text.html)** (from Facebook), released together with the paper [fairseq S2T: Fast Speech-to-Text Modeling with fairseq](https://arxiv.org/abs/2010.05171) by Changhan Wang, Yun Tang, Xutai Ma, Anne Wu, Dmytro Okhonko, Juan Pino.
|
||||
1. **[SqueezeBert](https://huggingface.co/transformers/model_doc/squeezebert.html)** released with the paper [SqueezeBERT: What can computer vision teach NLP about efficient neural networks?](https://arxiv.org/abs/2006.11316) by Forrest N. Iandola, Albert E. Shaw, Ravi Krishna, and Kurt W. Keutzer.
|
||||
1. **[T5](https://huggingface.co/transformers/model_doc/t5.html)** (from Google AI) released with the paper [Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer](https://arxiv.org/abs/1910.10683) by Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu.
|
||||
1. **[TAPAS](https://huggingface.co/transformers/model_doc/tapas.html)** (from Google AI) released with the paper [TAPAS: Weakly Supervised Table Parsing via Pre-training](https://arxiv.org/abs/2004.02349) by Jonathan Herzig, Paweł Krzysztof Nowak, Thomas Müller, Francesco Piccinno and Julian Martin Eisenschlos.
|
||||
1. **[Transformer-XL](https://huggingface.co/transformers/model_doc/transformerxl.html)** (from Google/CMU) released with the paper [Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context](https://arxiv.org/abs/1901.02860) by Zihang Dai*, Zhilin Yang*, Yiming Yang, Jaime Carbonell, Quoc V. Le, Ruslan Salakhutdinov.
|
||||
1. **[Vision Transformer (ViT)](https://huggingface.co/transformers/model_doc/vit.html)** (from Google AI) released with the paper [An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale](https://arxiv.org/abs/2010.11929) by Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, Neil Houlsby.
|
||||
1. **[Wav2Vec2](https://huggingface.co/transformers/model_doc/wav2vec2.html)** (from Facebook AI) released with the paper [wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations](https://arxiv.org/abs/2006.11477) by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli.
|
||||
1. **[XLM](https://huggingface.co/transformers/model_doc/xlm.html)** (from Facebook) released together with the paper [Cross-lingual Language Model Pretraining](https://arxiv.org/abs/1901.07291) by Guillaume Lample and Alexis Conneau.
|
||||
1. **[XLM-ProphetNet](https://huggingface.co/transformers/model_doc/xlmprophetnet.html)** (from Microsoft Research) released with the paper [ProphetNet: Predicting Future N-gram for Sequence-to-Sequence Pre-training](https://arxiv.org/abs/2001.04063) by Yu Yan, Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang and Ming Zhou.
|
||||
1. **[XLM-RoBERTa](https://huggingface.co/transformers/model_doc/xlmroberta.html)** (from Facebook AI), released together with the paper [Unsupervised Cross-lingual Representation Learning at Scale](https://arxiv.org/abs/1911.02116) by Alexis Conneau*, Kartikay Khandelwal*, Naman Goyal, Vishrav Chaudhary, Guillaume Wenzek, Francisco Guzmán, Edouard Grave, Myle Ott, Luke Zettlemoyer and Veselin Stoyanov.
|
||||
1. **[XLNet](https://huggingface.co/transformers/model_doc/xlnet.html)** (from Google/CMU) released with the paper [XLNet: Generalized Autoregressive Pretraining for Language Understanding](https://arxiv.org/abs/1906.08237) by Zhilin Yang*, Zihang Dai*, Yiming Yang, Jaime Carbonell, Ruslan Salakhutdinov, Quoc V. Le.
|
||||
1. **[XLSR-Wav2Vec2](https://huggingface.co/transformers/model_doc/xlsr_wav2vec2.html)** (from Facebook AI) released with the paper [Unsupervised Cross-Lingual Representation Learning For Speech Recognition](https://arxiv.org/abs/2006.13979) by Alexis Conneau, Alexei Baevski, Ronan Collobert, Abdelrahman Mohamed, Michael Auli.
|
||||
1. Want to contribute a new model? We have added a **detailed guide and templates** to guide you in the process of adding a new model. You can find them in the [`templates`](./templates) folder of the repository. Be sure to check the [contributing guidelines](./CONTRIBUTING.md) and contact the maintainers or open an issue to collect feedbacks before starting your PR.
|
||||
|
||||
To check if each model has an implementation in PyTorch/TensorFlow/Flax or has an associated tokenizer backed by the 🤗 Tokenizers library, refer to [this table](https://huggingface.co/transformers/index.html#bigtable)
|
||||
To check if each model has an implementation in Flax, PyTorch or TensorFlow, or has an associated tokenizer backed by the 🤗 Tokenizers library, refer to [this table](https://huggingface.co/transformers/index.html#bigtable).
|
||||
|
||||
These implementations have been tested on several datasets (see the example scripts) and should match the performances of the original implementations. You can find more details on the performances in the Examples section of the [documentation](https://huggingface.co/transformers/examples.html).
|
||||
These implementations have been tested on several datasets (see the example scripts) and should match the performance of the original implementations. You can find more details on performance in the Examples section of the [documentation](https://huggingface.co/transformers/examples.html).
|
||||
|
||||
|
||||
## Learn more
|
||||
|
||||
@ -53,7 +53,7 @@ RUN git clone https://github.com/huggingface/transformers.git && \
|
||||
git checkout CI && \
|
||||
cd .. && \
|
||||
pip install ./transformers && \
|
||||
pip install -r ./transformers/examples/requirements.txt && \
|
||||
pip install -r ./transformers/examples/pytorch/_test_requirements.txt && \
|
||||
pip install pytest
|
||||
|
||||
RUN python -c "import torch_xla; print(torch_xla.__version__)"
|
||||
|
||||
@ -27,7 +27,7 @@ local bertBaseCased = base.BaseTest {
|
||||
},
|
||||
command: utils.scriptCommand(
|
||||
|||
|
||||
python -m pytest -s transformers/examples/test_xla_examples.py -v
|
||||
python -m pytest -s transformers/examples/pytorch/test_xla_examples.py -v
|
||||
test_exit_code=$?
|
||||
echo "\nFinished running commands.\n"
|
||||
test $test_exit_code -eq 0
|
||||
|
||||
@ -26,7 +26,7 @@ pip install -e ".[docs]"
|
||||
---
|
||||
**NOTE**
|
||||
|
||||
You only need to generate the documentation to inspect it locally (if you're planning changes and want to
|
||||
You only need to generate the documentation to inspect it locally (if you're planning changes and want to
|
||||
check how they look like before committing for instance). You don't have to commit the built documentation.
|
||||
|
||||
---
|
||||
@ -65,7 +65,7 @@ make html
|
||||
```
|
||||
|
||||
A folder called ``_build/html`` should have been created. You can now open the file ``_build/html/index.html`` in your
|
||||
browser.
|
||||
browser.
|
||||
|
||||
---
|
||||
**NOTE**
|
||||
@ -95,15 +95,15 @@ following these steps:
|
||||
expand them).
|
||||
- Click on "details" next to the `ci/circleci: build_doc` check.
|
||||
- In the new window, click on the "Artifacts" tab.
|
||||
- Locate the file "docs/_build/html/index.html" (or any specific page you want to check) and click on it to get a
|
||||
- Locate the file "docs/_build/html/index.html" (or any specific page you want to check) and click on it to get a
|
||||
preview.
|
||||
|
||||
## Writing Documentation - Specification
|
||||
|
||||
The `huggingface/transformers` documentation follows the
|
||||
[Google documentation](https://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_google.html) style. It is
|
||||
mostly written in ReStructuredText
|
||||
([Sphinx simple documentation](https://www.sphinx-doc.org/en/master/usage/restructuredtext/index.html),
|
||||
mostly written in ReStructuredText
|
||||
([Sphinx simple documentation](https://www.sphinx-doc.org/en/master/usage/restructuredtext/index.html),
|
||||
[Sourceforge complete documentation](https://docutils.sourceforge.io/docs/ref/rst/restructuredtext.html)).
|
||||
|
||||
|
||||
@ -121,8 +121,8 @@ four.
|
||||
### Adding a new model
|
||||
|
||||
When adding a new model:
|
||||
|
||||
- Create a file `xxx.rst` under `./source/model_doc` (don't hesitate to copy an existing file as template).
|
||||
|
||||
- Create a file `xxx.rst` under `./source/model_doc` (don't hesitate to copy an existing file as template).
|
||||
- Link that file in `./source/index.rst` on the `model_doc` toc-tree.
|
||||
- Write a short overview of the model:
|
||||
- Overview with paper & authors
|
||||
@ -130,8 +130,8 @@ When adding a new model:
|
||||
- Tips and tricks and how to use it best
|
||||
- Add the classes that should be linked in the model. This generally includes the configuration, the tokenizer, and
|
||||
every model of that class (the base model, alongside models with additional heads), both in PyTorch and TensorFlow.
|
||||
The order is generally:
|
||||
- Configuration,
|
||||
The order is generally:
|
||||
- Configuration,
|
||||
- Tokenizer
|
||||
- PyTorch base model
|
||||
- PyTorch head models
|
||||
@ -179,7 +179,7 @@ Links should be done as so (note the double underscore at the end): \`text for t
|
||||
|
||||
#### Defining arguments in a method
|
||||
|
||||
Arguments should be defined with the `Args:` prefix, followed by a line return and an indentation.
|
||||
Arguments should be defined with the `Args:` prefix, followed by a line return and an indentation.
|
||||
The argument should be followed by its type, with its shape if it is a tensor, and a line return.
|
||||
Another indentation is necessary before writing the description of the argument.
|
||||
|
||||
@ -216,9 +216,9 @@ then its documentation should look like this:
|
||||
|
||||
Note that we always omit the "defaults to :obj:\`None\`" when None is the default for any argument. Also note that even
|
||||
if the first line describing your argument type and its default gets long, you can't break it on several lines. You can
|
||||
however write as many lines as you want in the indented description (see the example above with `input_ids`).
|
||||
however write as many lines as you want in the indented description (see the example above with `input_ids`).
|
||||
|
||||
#### Writing a multi-line code block
|
||||
#### Writing a multi-line code block
|
||||
|
||||
Multi-line code blocks can be useful for displaying examples. They are done like so:
|
||||
|
||||
@ -237,7 +237,7 @@ the results stay consistent with the library.
|
||||
|
||||
#### Writing a return block
|
||||
|
||||
Arguments should be defined with the `Args:` prefix, followed by a line return and an indentation.
|
||||
Arguments should be defined with the `Args:` prefix, followed by a line return and an indentation.
|
||||
The first line should be the type of the return, followed by a line return. No need to indent further for the elements
|
||||
building the return.
|
||||
|
||||
@ -258,3 +258,43 @@ Here's an example for a single value return:
|
||||
Returns:
|
||||
:obj:`List[int]`: A list of integers in the range [0, 1] --- 1 for a special token, 0 for a sequence token.
|
||||
```
|
||||
|
||||
#### Adding a new section
|
||||
|
||||
In ReST section headers are designated as such with the help of a line of underlying characters, e.g.,:
|
||||
|
||||
```
|
||||
Section 1
|
||||
^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Sub-section 1
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
```
|
||||
|
||||
ReST allows the use of any characters to designate different section levels, as long as they are used consistently within the same document. For details see [sections doc](https://www.sphinx-doc.org/en/master/usage/restructuredtext/basics.html#sections). Because there is no standard different documents often end up using different characters for the same levels which makes it very difficult to know which character to use when creating a new section.
|
||||
|
||||
Specifically, if when running `make docs` you get an error like:
|
||||
```
|
||||
docs/source/main_classes/trainer.rst:127:Title level inconsistent:
|
||||
```
|
||||
you picked an inconsistent character for some of the levels.
|
||||
|
||||
But how do you know which characters you must use for an already existing level or when adding a new level?
|
||||
|
||||
You can use this helper script:
|
||||
```
|
||||
perl -ne '/^(.)\1{100,}/ && do { $h{$1}=++$c if !$h{$1} }; END { %h = reverse %h ; print "$_ $h{$_}\n" for sort keys %h}' docs/source/main_classes/trainer.rst
|
||||
1 -
|
||||
2 ~
|
||||
3 ^
|
||||
4 =
|
||||
5 "
|
||||
```
|
||||
|
||||
This tells you which characters have already been assigned for each level.
|
||||
|
||||
So using this particular example's output -- if your current section's header uses `=` as its underline character, you now know you're at level 4, and if you want to add a sub-section header you know you want `"` as it'd level 5.
|
||||
|
||||
If you needed to add yet another sub-level, then pick a character that is not used already. That is you must pick a character that is not in the output of that script.
|
||||
|
||||
Here is the full list of characters that can be used in this context: `= - ` : ' " ~ ^ _ * + # < >`
|
||||
|
||||
@ -1,10 +1,13 @@
|
||||
// These two things need to be updated at each release for the version selector.
|
||||
// Last stable version
|
||||
const stableVersion = "v4.2.0"
|
||||
const stableVersion = "v4.5.1"
|
||||
// Dictionary doc folder to label. The last stable version should have an empty key.
|
||||
const versionMapping = {
|
||||
"master": "master",
|
||||
"": "v4.2.0/v4.2.1 (stable)",
|
||||
"": "v4.5.0/v4.5.1 (stable)",
|
||||
"v4.4.2": "v4.4.0/v4.4.1/v4.4.2",
|
||||
"v4.3.3": "v4.3.0/v4.3.1/v4.3.2/v4.3.3",
|
||||
"v4.2.2": "v4.2.0/v4.2.1/v4.2.2",
|
||||
"v4.1.1": "v4.1.0/v4.1.1",
|
||||
"v4.0.1": "v4.0.0/v4.0.1",
|
||||
"v3.5.1": "v3.5.0/v3.5.1",
|
||||
@ -60,7 +63,7 @@ function addIcon() {
|
||||
function addCustomFooter() {
|
||||
const customFooter = document.createElement("div");
|
||||
const questionOrIssue = document.createElement("div");
|
||||
questionOrIssue.innerHTML = "Stuck? Read our <a href='https://medium.com/huggingface'>Blog posts</a> or <a href='https://github.com/huggingface/transformers'>Create an issue</a>";
|
||||
questionOrIssue.innerHTML = "Stuck? Read our <a href='https://huggingface.co/blog'>Blog posts</a> or <a href='https://github.com/huggingface/transformers'>Create an issue</a>";
|
||||
customFooter.appendChild(questionOrIssue);
|
||||
customFooter.classList.add("footer");
|
||||
|
||||
@ -127,11 +130,11 @@ function addVersionControl() {
|
||||
const parts = location.toString().split('/');
|
||||
let versionIndex = parts.length - 2;
|
||||
// Index page may not have a last part with filename.html so we need to go up
|
||||
if (parts[parts.length - 1] != "" && ! parts[parts.length - 1].match(/\.html$|^search.html?/)) {
|
||||
if (parts[parts.length - 1] != "" && ! parts[parts.length - 1].match(/\.html/)) {
|
||||
versionIndex = parts.length - 1;
|
||||
}
|
||||
// Main classes and models are nested so we need to go deeper
|
||||
else if (parts[versionIndex] == "main_classes" || parts[versionIndex] == "model_doc") {
|
||||
else if (parts[versionIndex] == "main_classes" || parts[versionIndex] == "model_doc" || parts[versionIndex] == "internal") {
|
||||
versionIndex = versionIndex - 1;
|
||||
}
|
||||
const version = parts[versionIndex];
|
||||
|
||||
@ -388,7 +388,7 @@ Next, you can finally start adding new code to 🤗 Transformers. Go into the cl
|
||||
|
||||
::
|
||||
|
||||
cd transformers
|
||||
cd transformers
|
||||
|
||||
In the special case that you are adding a model whose architecture exactly matches the model architecture of an
|
||||
existing model you only have to add a conversion script as described in `this section <#write-a-conversion-script>`__.
|
||||
@ -417,27 +417,27 @@ You should do the following:
|
||||
|
||||
::
|
||||
|
||||
git checkout -b add_brand_new_bert
|
||||
git checkout -b add_brand_new_bert
|
||||
|
||||
2. Commit the automatically generated code:
|
||||
|
||||
::
|
||||
|
||||
git add .
|
||||
git commit
|
||||
git add .
|
||||
git commit
|
||||
|
||||
3. Fetch and rebase to current master
|
||||
|
||||
::
|
||||
|
||||
git fetch upstream
|
||||
git rebase upstream/master
|
||||
git fetch upstream
|
||||
git rebase upstream/master
|
||||
|
||||
4. Push the changes to your account using:
|
||||
|
||||
::
|
||||
|
||||
git push -u origin a-descriptive-name-for-my-changes
|
||||
git push -u origin a-descriptive-name-for-my-changes
|
||||
|
||||
5. Once you are satisfied, go to the webpage of your fork on GitHub. Click on “Pull request”. Make sure to add the
|
||||
GitHub handle of some members of the Hugging Face team as reviewers, so that the Hugging Face team gets notified for
|
||||
@ -451,8 +451,8 @@ time to time by doing:
|
||||
|
||||
::
|
||||
|
||||
git fetch upstream
|
||||
git merge upstream/master
|
||||
git fetch upstream
|
||||
git merge upstream/master
|
||||
|
||||
In general, all questions you might have regarding the model or your implementation should be asked in your PR and
|
||||
discussed/solved in the PR. This way, the Hugging Face team will always be notified when you are committing new code or
|
||||
|
||||
@ -65,10 +65,10 @@ respectively.
|
||||
.. code-block:: bash
|
||||
|
||||
## PYTORCH CODE
|
||||
python examples/benchmarking/run_benchmark.py --help
|
||||
python examples/pytorch/benchmarking/run_benchmark.py --help
|
||||
|
||||
## TENSORFLOW CODE
|
||||
python examples/benchmarking/run_benchmark_tf.py --help
|
||||
python examples/tensorflow/benchmarking/run_benchmark_tf.py --help
|
||||
|
||||
|
||||
An instantiated benchmark object can then simply be run by calling ``benchmark.run()``.
|
||||
|
||||
@ -18,8 +18,8 @@ This page regroups resources around 🤗 Transformers developed by the community
|
||||
| [Fine-tune DialoGPT on New Datasets and Languages](https://github.com/ncoop57/i-am-a-nerd/blob/master/_notebooks/2020-05-12-chatbot-part-1.ipynb) | How to fine-tune the DialoGPT model on a new dataset for open-dialog conversational chatbots | [Nathan Cooper](https://github.com/ncoop57) | [](https://colab.research.google.com/github/ncoop57/i-am-a-nerd/blob/master/_notebooks/2020-05-12-chatbot-part-1.ipynb) |
|
||||
| [Long Sequence Modeling with Reformer](https://github.com/patrickvonplaten/notebooks/blob/master/PyTorch_Reformer.ipynb) | How to train on sequences as long as 500,000 tokens with Reformer | [Patrick von Platen](https://github.com/patrickvonplaten) | [](https://colab.research.google.com/github/patrickvonplaten/notebooks/blob/master/PyTorch_Reformer.ipynb) |
|
||||
| [Fine-tune BART for Summarization](https://github.com/ohmeow/ohmeow_website/blob/master/_notebooks/2020-05-23-text-generation-with-blurr.ipynb) | How to fine-tune BART for summarization with fastai using blurr | [Wayde Gilliam](https://ohmeow.com/) | [](https://colab.research.google.com/github/ohmeow/ohmeow_website/blob/master/_notebooks/2020-05-23-text-generation-with-blurr.ipynb) |
|
||||
| [Fine-tune a pre-trained Transformer on anyone's tweets](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb) | How to generate tweets in the style of your favorite Twitter account by fine-tune a GPT-2 model | [Boris Dayma](https://github.com/borisdayma) | [](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb) |
|
||||
| [A Step by Step Guide to Tracking Hugging Face Model Performance](https://colab.research.google.com/drive/1NEiqNPhiouu2pPwDAVeFoN4-vTYMz9F8) | A quick tutorial for training NLP models with HuggingFace and & visualizing their performance with Weights & Biases | [Jack Morris](https://github.com/jxmorris12) | [](https://colab.research.google.com/drive/1NEiqNPhiouu2pPwDAVeFoN4-vTYMz9F8) |
|
||||
| [Fine-tune a pre-trained Transformer on anyone's tweets](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb) | How to generate tweets in the style of your favorite Twitter account by fine-tuning a GPT-2 model | [Boris Dayma](https://github.com/borisdayma) | [](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb) |
|
||||
| [Optimize 🤗 Hugging Face models with Weights & Biases](https://colab.research.google.com/github/wandb/examples/blob/master/colabs/huggingface/Optimize_Hugging_Face_models_with_Weights_%26_Biases.ipynb) | A complete tutorial showcasing W&B integration with Hugging Face | [Boris Dayma](https://github.com/borisdayma) | [](https://colab.research.google.com/github/wandb/examples/blob/master/colabs/huggingface/Optimize_Hugging_Face_models_with_Weights_%26_Biases.ipynb) |
|
||||
| [Pretrain Longformer](https://github.com/allenai/longformer/blob/master/scripts/convert_model_to_long.ipynb) | How to build a "long" version of existing pretrained models | [Iz Beltagy](https://beltagy.net) | [](https://colab.research.google.com/github/allenai/longformer/blob/master/scripts/convert_model_to_long.ipynb) |
|
||||
| [Fine-tune Longformer for QA](https://github.com/patil-suraj/Notebooks/blob/master/longformer_qa_training.ipynb) | How to fine-tune longformer model for QA task | [Suraj Patil](https://github.com/patil-suraj) | [](https://colab.research.google.com/github/patil-suraj/Notebooks/blob/master/longformer_qa_training.ipynb) |
|
||||
| [Evaluate Model with 🤗nlp](https://github.com/patrickvonplaten/notebooks/blob/master/How_to_evaluate_Longformer_on_TriviaQA_using_NLP.ipynb) | How to evaluate longformer on TriviaQA with `nlp` | [Patrick von Platen](https://github.com/patrickvonplaten) | [](https://colab.research.google.com/drive/1m7eTGlPmLRgoPkkA7rkhQdZ9ydpmsdLE?usp=sharing) |
|
||||
@ -30,6 +30,7 @@ This page regroups resources around 🤗 Transformers developed by the community
|
||||
|[Speed up Fine-Tuning in Transformers with Dynamic Padding / Bucketing](https://github.com/ELS-RD/transformers-notebook/blob/master/Divide_Hugging_Face_Transformers_training_time_by_2_or_more.ipynb)|How to speed up fine-tuning by a factor of 2 using dynamic padding / bucketing|[Michael Benesty](https://github.com/pommedeterresautee) |[](https://colab.research.google.com/drive/1CBfRU1zbfu7-ijiOqAAQUA-RJaxfcJoO?usp=sharing)|
|
||||
|[Pretrain Reformer for Masked Language Modeling](https://github.com/patrickvonplaten/notebooks/blob/master/Reformer_For_Masked_LM.ipynb)| How to train a Reformer model with bi-directional self-attention layers | [Patrick von Platen](https://github.com/patrickvonplaten) | [](https://colab.research.google.com/drive/1tzzh0i8PgDQGV3SMFUGxM7_gGae3K-uW?usp=sharing)|
|
||||
|[Expand and Fine Tune Sci-BERT](https://github.com/lordtt13/word-embeddings/blob/master/COVID-19%20Research%20Data/COVID-SciBERT.ipynb)| How to increase vocabulary of a pretrained SciBERT model from AllenAI on the CORD dataset and pipeline it. | [Tanmay Thakur](https://github.com/lordtt13) | [](https://colab.research.google.com/drive/1rqAR40goxbAfez1xvF3hBJphSCsvXmh8)|
|
||||
|[Fine Tune BlenderBotSmall for Summarization using the Trainer API](https://github.com/lordtt13/transformers-experiments/blob/master/Custom%20Tasks/fine-tune-blenderbot_small-for-summarization.ipynb)| How to fine tune BlenderBotSmall for summarization on a custom dataset, using the Trainer API. | [Tanmay Thakur](https://github.com/lordtt13) | [](https://colab.research.google.com/drive/19Wmupuls7mykSGyRN_Qo6lPQhgp56ymq?usp=sharing)|
|
||||
|[Fine-tune Electra and interpret with Integrated Gradients](https://github.com/elsanns/xai-nlp-notebooks/blob/master/electra_fine_tune_interpret_captum_ig.ipynb) | How to fine-tune Electra for sentiment analysis and interpret predictions with Captum Integrated Gradients | [Eliza Szczechla](https://elsanns.github.io) | [](https://colab.research.google.com/github/elsanns/xai-nlp-notebooks/blob/master/electra_fine_tune_interpret_captum_ig.ipynb)|
|
||||
|[fine-tune a non-English GPT-2 Model with Trainer class](https://github.com/philschmid/fine-tune-GPT-2/blob/master/Fine_tune_a_non_English_GPT_2_Model_with_Huggingface.ipynb) | How to fine-tune a non-English GPT-2 Model with Trainer class | [Philipp Schmid](https://www.philschmid.de) | [](https://colab.research.google.com/github/philschmid/fine-tune-GPT-2/blob/master/Fine_tune_a_non_English_GPT_2_Model_with_Huggingface.ipynb)|
|
||||
|[Fine-tune a DistilBERT Model for Multi Label Classification task](https://github.com/DhavalTaunk08/Transformers_scripts/blob/master/Transformers_multilabel_distilbert.ipynb) | How to fine-tune a DistilBERT Model for Multi Label Classification task | [Dhaval Taunk](https://github.com/DhavalTaunk08) | [](https://colab.research.google.com/github/DhavalTaunk08/Transformers_scripts/blob/master/Transformers_multilabel_distilbert.ipynb)|
|
||||
@ -47,3 +48,11 @@ This page regroups resources around 🤗 Transformers developed by the community
|
||||
|[Fine-Tune LED on up to 8K tokens](https://github.com/patrickvonplaten/notebooks/blob/master/Fine_tune_Longformer_Encoder_Decoder_(LED)_for_Summarization_on_pubmed.ipynb) | How to fine-tune LED on pubmed for long-range summarization | [Patrick von Platen](https://github.com/patrickvonplaten) | [](https://colab.research.google.com/github/patrickvonplaten/notebooks/blob/master/Fine_tune_Longformer_Encoder_Decoder_(LED)_for_Summarization_on_pubmed.ipynb)|
|
||||
|[Evaluate LED on Arxiv](https://github.com/patrickvonplaten/notebooks/blob/master/LED_on_Arxiv.ipynb) | How to effectively evaluate LED on long-range summarization | [Patrick von Platen](https://github.com/patrickvonplaten) | [](https://colab.research.google.com/github/patrickvonplaten/notebooks/blob/master/LED_on_Arxiv.ipynb)|
|
||||
|[Fine-tune LayoutLM on RVL-CDIP (a document image classification dataset)](https://github.com/NielsRogge/Transformers-Tutorials/blob/master/LayoutLM/Fine_tuning_LayoutLMForSequenceClassification_on_RVL_CDIP.ipynb) | How to fine-tune *LayoutLMForSequenceClassification* on the RVL-CDIP dataset for scanned document classification | [Niels Rogge](https://github.com/nielsrogge) | [](https://colab.research.google.com/github/NielsRogge/Transformers-Tutorials/blob/master/LayoutLM/Fine_tuning_LayoutLMForSequenceClassification_on_RVL_CDIP.ipynb)|
|
||||
|[Wav2Vec2 CTC decoding with GPT2 adjustment](https://github.com/voidful/huggingface_notebook/blob/main/xlsr_gpt.ipynb) | How to decode CTC sequence with language model adjustment | [Eric Lam](https://github.com/voidful) | [](https://colab.research.google.com/drive/1e_z5jQHYbO2YKEaUgzb1ww1WwiAyydAj?usp=sharing)|
|
||||
|[Fine-tune BART for summarization in two languages with Trainer class](https://github.com/elsanns/xai-nlp-notebooks/blob/master/fine_tune_bart_summarization_two_langs.ipynb) | How to fine-tune BART for summarization in two languages with Trainer class | [Eliza Szczechla](https://github.com/elsanns) | [](https://colab.research.google.com/github/elsanns/xai-nlp-notebooks/blob/master/fine_tune_bart_summarization_two_langs.ipynb)|
|
||||
|[Evaluate Big Bird on Trivia QA](https://github.com/patrickvonplaten/notebooks/blob/master/Evaluating_Big_Bird_on_TriviaQA.ipynb) | How to evaluate BigBird on long document question answering on Trivia QA | [Patrick von Platen](https://github.com/patrickvonplaten) | [](https://colab.research.google.com/github/patrickvonplaten/notebooks/blob/master/Evaluating_Big_Bird_on_TriviaQA.ipynb)|
|
||||
| [Create video captions using Wav2Vec2](https://github.com/Muennighoff/ytclipcc/blob/main/wav2vec_youtube_captions.ipynb) | How to create YouTube captions from any video by transcribing the audio with Wav2Vec | [Niklas Muennighoff](https://github.com/Muennighoff) |[](https://colab.research.google.com/github/Muennighoff/ytclipcc/blob/main/wav2vec_youtube_captions.ipynb) |
|
||||
| [Evaluate LUKE on Open Entity, an entity typing dataset](https://github.com/studio-ousia/luke/blob/master/notebooks/huggingface_open_entity.ipynb) | How to evaluate *LukeForEntityClassification* on the Open Entity dataset | [Ikuya Yamada](https://github.com/ikuyamada) |[](https://colab.research.google.com/github/studio-ousia/luke/blob/master/notebooks/huggingface_open_entity.ipynb) |
|
||||
| [Evaluate LUKE on TACRED, a relation extraction dataset](https://github.com/studio-ousia/luke/blob/master/notebooks/huggingface_tacred.ipynb) | How to evaluate *LukeForEntityPairClassification* on the TACRED dataset | [Ikuya Yamada](https://github.com/ikuyamada) |[](https://colab.research.google.com/github/studio-ousia/luke/blob/master/notebooks/huggingface_tacred.ipynb) |
|
||||
| [Evaluate LUKE on CoNLL-2003, an important NER benchmark](https://github.com/studio-ousia/luke/blob/master/notebooks/huggingface_conll_2003.ipynb) | How to evaluate *LukeForEntitySpanClassification* on the CoNLL-2003 dataset | [Ikuya Yamada](https://github.com/ikuyamada) |[](https://colab.research.google.com/github/studio-ousia/luke/blob/master/notebooks/huggingface_conll_2003.ipynb) |
|
||||
| [Evaluate BigBird-Pegasus on PubMed dataset](https://github.com/vasudevgupta7/bigbird/blob/main/notebooks/bigbird_pegasus_evaluation.ipynb) | How to evaluate *BigBirdPegasusForConditionalGeneration* on PubMed dataset | [Vasudev Gupta](https://github.com/vasudevgupta7) | [](https://colab.research.google.com/github/vasudevgupta7/bigbird/blob/main/notebooks/bigbird_pegasus_evaluation.ipynb) |
|
||||
|
||||
@ -14,21 +14,24 @@
|
||||
#
|
||||
import os
|
||||
import sys
|
||||
sys.path.insert(0, os.path.abspath('../../src'))
|
||||
|
||||
sys.path.insert(0, os.path.abspath("../../src"))
|
||||
|
||||
|
||||
# -- Project information -----------------------------------------------------
|
||||
|
||||
project = u'transformers'
|
||||
copyright = u'2020, The Hugging Face Team, Licenced under the Apache License, Version 2.0'
|
||||
author = u'huggingface'
|
||||
project = "transformers"
|
||||
copyright = "2020, The Hugging Face Team, Licenced under the Apache License, Version 2.0"
|
||||
author = "huggingface"
|
||||
|
||||
# The short X.Y version
|
||||
version = u''
|
||||
version = ""
|
||||
# The full version, including alpha/beta/rc tags
|
||||
release = u'4.3.0'
|
||||
release = "4.5.0.dev0"
|
||||
|
||||
|
||||
# Prefix link to point to master, comment this during version release and uncomment below line
|
||||
extlinks = {'prefix_link': ('https://github.com/huggingface/transformers/blob/master/%s', '')}
|
||||
extlinks = {"prefix_link": ("https://github.com/huggingface/transformers/blob/master/%s", "")}
|
||||
# Prefix link to always point to corresponding version, uncomment this during version release
|
||||
# extlinks = {'prefix_link': ('https://github.com/huggingface/transformers/blob/v'+ release + '/%s', '')}
|
||||
|
||||
@ -42,27 +45,28 @@ extlinks = {'prefix_link': ('https://github.com/huggingface/transformers/blob/ma
|
||||
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
|
||||
# ones.
|
||||
extensions = [
|
||||
'sphinx.ext.autodoc',
|
||||
'sphinx.ext.extlinks',
|
||||
'sphinx.ext.coverage',
|
||||
'sphinx.ext.napoleon',
|
||||
'recommonmark',
|
||||
'sphinx.ext.viewcode',
|
||||
'sphinx_markdown_tables',
|
||||
'sphinx_copybutton'
|
||||
"sphinx.ext.autodoc",
|
||||
"sphinx.ext.extlinks",
|
||||
"sphinx.ext.coverage",
|
||||
"sphinx.ext.napoleon",
|
||||
"recommonmark",
|
||||
"sphinx.ext.viewcode",
|
||||
"sphinx_markdown_tables",
|
||||
"sphinxext.opengraph",
|
||||
"sphinx_copybutton",
|
||||
]
|
||||
|
||||
# Add any paths that contain templates here, relative to this directory.
|
||||
templates_path = ['_templates']
|
||||
templates_path = ["_templates"]
|
||||
|
||||
# The suffix(es) of source filenames.
|
||||
# You can specify multiple suffix as a list of string:
|
||||
#
|
||||
source_suffix = ['.rst', '.md']
|
||||
source_suffix = [".rst", ".md"]
|
||||
# source_suffix = '.rst'
|
||||
|
||||
# The master toctree document.
|
||||
master_doc = 'index'
|
||||
master_doc = "index"
|
||||
|
||||
# The language for content autogenerated by Sphinx. Refer to documentation
|
||||
# for a list of supported languages.
|
||||
@ -74,7 +78,7 @@ language = None
|
||||
# List of patterns, relative to source directory, that match files and
|
||||
# directories to ignore when looking for source files.
|
||||
# This pattern also affects html_static_path and html_extra_path.
|
||||
exclude_patterns = [u'_build', 'Thumbs.db', '.DS_Store']
|
||||
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
|
||||
|
||||
# The name of the Pygments (syntax highlighting) style to use.
|
||||
pygments_style = None
|
||||
@ -88,20 +92,30 @@ copybutton_prompt_is_regexp = True
|
||||
# The theme to use for HTML and HTML Help pages. See the documentation for
|
||||
# a list of builtin themes.
|
||||
#
|
||||
html_theme = 'sphinx_rtd_theme'
|
||||
html_theme = "sphinx_rtd_theme"
|
||||
|
||||
# Theme options are theme-specific and customize the look and feel of a theme
|
||||
# further. For a list of options available for each theme, see the
|
||||
# documentation.
|
||||
#
|
||||
html_theme_options = {
|
||||
'analytics_id': 'UA-83738774-2'
|
||||
}
|
||||
html_theme_options = {"analytics_id": "UA-83738774-2", "navigation_with_keys": True}
|
||||
|
||||
# Configuration for OpenGraph and Twitter Card Tags.
|
||||
# These are responsible for creating nice shareable social images https://ahrefs.com/blog/open-graph-meta-tags/
|
||||
# https://ogp.me/#type_website
|
||||
ogp_image = "https://huggingface.co/front/thumbnails/transformers.png"
|
||||
ogp_description = "State-of-the-art Natural Language Processing for PyTorch and TensorFlow 2.0. Transformers provides thousands of pretrained models to perform tasks on texts such as classification, information extraction, question answering, summarization, translation, text generation, etc in 100+ languages. Its aim is to make cutting-edge NLP easier to use for everyone"
|
||||
ogp_description_length = 160
|
||||
|
||||
ogp_custom_meta_tags = [
|
||||
f'<meta name="twitter:image" content="{ogp_image}">',
|
||||
f'<meta name="twitter:description" content="{ogp_description}">',
|
||||
]
|
||||
|
||||
# Add any paths that contain custom static files (such as style sheets) here,
|
||||
# relative to this directory. They are copied after the builtin static files,
|
||||
# so a file named "default.css" will overwrite the builtin "default.css".
|
||||
html_static_path = ['_static']
|
||||
html_static_path = ["_static"]
|
||||
|
||||
# Custom sidebar templates, must be a dictionary that maps document names
|
||||
# to template names.
|
||||
@ -113,17 +127,17 @@ html_static_path = ['_static']
|
||||
#
|
||||
# html_sidebars = {}
|
||||
|
||||
# This must be the name of an image file (path relative to the configuration
|
||||
# directory) that is the favicon of the docs. Modern browsers use this as
|
||||
# the icon for tabs, windows and bookmarks. It should be a Windows-style
|
||||
# This must be the name of an image file (path relative to the configuration
|
||||
# directory) that is the favicon of the docs. Modern browsers use this as
|
||||
# the icon for tabs, windows and bookmarks. It should be a Windows-style
|
||||
# icon file (.ico).
|
||||
html_favicon = 'favicon.ico'
|
||||
html_favicon = "favicon.ico"
|
||||
|
||||
|
||||
# -- Options for HTMLHelp output ---------------------------------------------
|
||||
|
||||
# Output file base name for HTML help builder.
|
||||
htmlhelp_basename = 'transformersdoc'
|
||||
htmlhelp_basename = "transformersdoc"
|
||||
|
||||
|
||||
# -- Options for LaTeX output ------------------------------------------------
|
||||
@ -132,15 +146,12 @@ latex_elements = {
|
||||
# The paper size ('letterpaper' or 'a4paper').
|
||||
#
|
||||
# 'papersize': 'letterpaper',
|
||||
|
||||
# The font size ('10pt', '11pt' or '12pt').
|
||||
#
|
||||
# 'pointsize': '10pt',
|
||||
|
||||
# Additional stuff for the LaTeX preamble.
|
||||
#
|
||||
# 'preamble': '',
|
||||
|
||||
# Latex figure (float) alignment
|
||||
#
|
||||
# 'figure_align': 'htbp',
|
||||
@ -150,8 +161,7 @@ latex_elements = {
|
||||
# (source start file, target name, title,
|
||||
# author, documentclass [howto, manual, or own class]).
|
||||
latex_documents = [
|
||||
(master_doc, 'transformers.tex', u'transformers Documentation',
|
||||
u'huggingface', 'manual'),
|
||||
(master_doc, "transformers.tex", "transformers Documentation", "huggingface", "manual"),
|
||||
]
|
||||
|
||||
|
||||
@ -159,10 +169,7 @@ latex_documents = [
|
||||
|
||||
# One entry per manual page. List of tuples
|
||||
# (source start file, name, description, authors, manual section).
|
||||
man_pages = [
|
||||
(master_doc, 'transformers', u'transformers Documentation',
|
||||
[author], 1)
|
||||
]
|
||||
man_pages = [(master_doc, "transformers", "transformers Documentation", [author], 1)]
|
||||
|
||||
|
||||
# -- Options for Texinfo output ----------------------------------------------
|
||||
@ -171,9 +178,15 @@ man_pages = [
|
||||
# (source start file, target name, title, author,
|
||||
# dir menu entry, description, category)
|
||||
texinfo_documents = [
|
||||
(master_doc, 'transformers', u'transformers Documentation',
|
||||
author, 'transformers', 'One line description of project.',
|
||||
'Miscellaneous'),
|
||||
(
|
||||
master_doc,
|
||||
"transformers",
|
||||
"transformers Documentation",
|
||||
author,
|
||||
"transformers",
|
||||
"One line description of project.",
|
||||
"Miscellaneous",
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
@ -192,11 +205,13 @@ epub_title = project
|
||||
# epub_uid = ''
|
||||
|
||||
# A list of files that should not be packed into the epub file.
|
||||
epub_exclude_files = ['search.html']
|
||||
epub_exclude_files = ["search.html"]
|
||||
|
||||
|
||||
def setup(app):
|
||||
app.add_css_file('css/huggingface.css')
|
||||
app.add_css_file('css/code-snippets.css')
|
||||
app.add_js_file('js/custom.js')
|
||||
app.add_css_file("css/huggingface.css")
|
||||
app.add_css_file("css/code-snippets.css")
|
||||
app.add_js_file("js/custom.js")
|
||||
|
||||
|
||||
# -- Extension configuration -------------------------------------------------
|
||||
|
||||
@ -33,8 +33,8 @@ You can convert any TensorFlow checkpoint for BERT (in particular `the pre-train
|
||||
This CLI takes as input a TensorFlow checkpoint (three files starting with ``bert_model.ckpt``\ ) and the associated
|
||||
configuration file (\ ``bert_config.json``\ ), and creates a PyTorch model for this configuration, loads the weights
|
||||
from the TensorFlow checkpoint in the PyTorch model and saves the resulting model in a standard PyTorch save file that
|
||||
can be imported using ``from_pretrained()`` (see example in :doc:`quicktour` , `run_glue.py
|
||||
<https://github.com/huggingface/transformers/blob/master/examples/text-classification/run_glue.py>`_\ ).
|
||||
can be imported using ``from_pretrained()`` (see example in :doc:`quicktour` , :prefix_link:`run_glue.py
|
||||
<examples/pytorch/text-classification/run_glue.py>` \ ).
|
||||
|
||||
You only need to run this conversion script **once** to get a PyTorch model. You can then disregard the TensorFlow
|
||||
checkpoint (the three files starting with ``bert_model.ckpt``\ ) but be sure to keep the configuration file (\
|
||||
@ -47,12 +47,12 @@ Here is an example of the conversion process for a pre-trained ``BERT-Base Uncas
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
export BERT_BASE_DIR=/path/to/bert/uncased_L-12_H-768_A-12
|
||||
export BERT_BASE_DIR=/path/to/bert/uncased_L-12_H-768_A-12
|
||||
|
||||
transformers-cli convert --model_type bert \
|
||||
--tf_checkpoint $BERT_BASE_DIR/bert_model.ckpt \
|
||||
--config $BERT_BASE_DIR/bert_config.json \
|
||||
--pytorch_dump_output $BERT_BASE_DIR/pytorch_model.bin
|
||||
transformers-cli convert --model_type bert \
|
||||
--tf_checkpoint $BERT_BASE_DIR/bert_model.ckpt \
|
||||
--config $BERT_BASE_DIR/bert_config.json \
|
||||
--pytorch_dump_output $BERT_BASE_DIR/pytorch_model.bin
|
||||
|
||||
You can download Google's pre-trained models for the conversion `here
|
||||
<https://github.com/google-research/bert#pre-trained-models>`__.
|
||||
@ -72,12 +72,12 @@ Here is an example of the conversion process for the pre-trained ``ALBERT Base``
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
export ALBERT_BASE_DIR=/path/to/albert/albert_base
|
||||
export ALBERT_BASE_DIR=/path/to/albert/albert_base
|
||||
|
||||
transformers-cli convert --model_type albert \
|
||||
--tf_checkpoint $ALBERT_BASE_DIR/model.ckpt-best \
|
||||
--config $ALBERT_BASE_DIR/albert_config.json \
|
||||
--pytorch_dump_output $ALBERT_BASE_DIR/pytorch_model.bin
|
||||
transformers-cli convert --model_type albert \
|
||||
--tf_checkpoint $ALBERT_BASE_DIR/model.ckpt-best \
|
||||
--config $ALBERT_BASE_DIR/albert_config.json \
|
||||
--pytorch_dump_output $ALBERT_BASE_DIR/pytorch_model.bin
|
||||
|
||||
You can download Google's pre-trained models for the conversion `here
|
||||
<https://github.com/google-research/albert#pre-trained-models>`__.
|
||||
@ -91,13 +91,13 @@ save as the same format than OpenAI pretrained model (see `here <https://github.
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
export OPENAI_GPT_CHECKPOINT_FOLDER_PATH=/path/to/openai/pretrained/numpy/weights
|
||||
export OPENAI_GPT_CHECKPOINT_FOLDER_PATH=/path/to/openai/pretrained/numpy/weights
|
||||
|
||||
transformers-cli convert --model_type gpt \
|
||||
--tf_checkpoint $OPENAI_GPT_CHECKPOINT_FOLDER_PATH \
|
||||
--pytorch_dump_output $PYTORCH_DUMP_OUTPUT \
|
||||
[--config OPENAI_GPT_CONFIG] \
|
||||
[--finetuning_task_name OPENAI_GPT_FINETUNED_TASK] \
|
||||
transformers-cli convert --model_type gpt \
|
||||
--tf_checkpoint $OPENAI_GPT_CHECKPOINT_FOLDER_PATH \
|
||||
--pytorch_dump_output $PYTORCH_DUMP_OUTPUT \
|
||||
[--config OPENAI_GPT_CONFIG] \
|
||||
[--finetuning_task_name OPENAI_GPT_FINETUNED_TASK] \
|
||||
|
||||
|
||||
OpenAI GPT-2
|
||||
@ -108,13 +108,13 @@ Here is an example of the conversion process for a pre-trained OpenAI GPT-2 mode
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
export OPENAI_GPT2_CHECKPOINT_PATH=/path/to/gpt2/pretrained/weights
|
||||
export OPENAI_GPT2_CHECKPOINT_PATH=/path/to/gpt2/pretrained/weights
|
||||
|
||||
transformers-cli convert --model_type gpt2 \
|
||||
--tf_checkpoint $OPENAI_GPT2_CHECKPOINT_PATH \
|
||||
--pytorch_dump_output $PYTORCH_DUMP_OUTPUT \
|
||||
[--config OPENAI_GPT2_CONFIG] \
|
||||
[--finetuning_task_name OPENAI_GPT2_FINETUNED_TASK]
|
||||
transformers-cli convert --model_type gpt2 \
|
||||
--tf_checkpoint $OPENAI_GPT2_CHECKPOINT_PATH \
|
||||
--pytorch_dump_output $PYTORCH_DUMP_OUTPUT \
|
||||
[--config OPENAI_GPT2_CONFIG] \
|
||||
[--finetuning_task_name OPENAI_GPT2_FINETUNED_TASK]
|
||||
|
||||
Transformer-XL
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
@ -124,13 +124,13 @@ Here is an example of the conversion process for a pre-trained Transformer-XL mo
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
export TRANSFO_XL_CHECKPOINT_FOLDER_PATH=/path/to/transfo/xl/checkpoint
|
||||
export TRANSFO_XL_CHECKPOINT_FOLDER_PATH=/path/to/transfo/xl/checkpoint
|
||||
|
||||
transformers-cli convert --model_type transfo_xl \
|
||||
--tf_checkpoint $TRANSFO_XL_CHECKPOINT_FOLDER_PATH \
|
||||
--pytorch_dump_output $PYTORCH_DUMP_OUTPUT \
|
||||
[--config TRANSFO_XL_CONFIG] \
|
||||
[--finetuning_task_name TRANSFO_XL_FINETUNED_TASK]
|
||||
transformers-cli convert --model_type transfo_xl \
|
||||
--tf_checkpoint $TRANSFO_XL_CHECKPOINT_FOLDER_PATH \
|
||||
--pytorch_dump_output $PYTORCH_DUMP_OUTPUT \
|
||||
[--config TRANSFO_XL_CONFIG] \
|
||||
[--finetuning_task_name TRANSFO_XL_FINETUNED_TASK]
|
||||
|
||||
|
||||
XLNet
|
||||
@ -140,14 +140,14 @@ Here is an example of the conversion process for a pre-trained XLNet model:
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
export TRANSFO_XL_CHECKPOINT_PATH=/path/to/xlnet/checkpoint
|
||||
export TRANSFO_XL_CONFIG_PATH=/path/to/xlnet/config
|
||||
export TRANSFO_XL_CHECKPOINT_PATH=/path/to/xlnet/checkpoint
|
||||
export TRANSFO_XL_CONFIG_PATH=/path/to/xlnet/config
|
||||
|
||||
transformers-cli convert --model_type xlnet \
|
||||
--tf_checkpoint $TRANSFO_XL_CHECKPOINT_PATH \
|
||||
--config $TRANSFO_XL_CONFIG_PATH \
|
||||
--pytorch_dump_output $PYTORCH_DUMP_OUTPUT \
|
||||
[--finetuning_task_name XLNET_FINETUNED_TASK] \
|
||||
transformers-cli convert --model_type xlnet \
|
||||
--tf_checkpoint $TRANSFO_XL_CHECKPOINT_PATH \
|
||||
--config $TRANSFO_XL_CONFIG_PATH \
|
||||
--pytorch_dump_output $PYTORCH_DUMP_OUTPUT \
|
||||
[--finetuning_task_name XLNET_FINETUNED_TASK] \
|
||||
|
||||
|
||||
XLM
|
||||
@ -157,13 +157,13 @@ Here is an example of the conversion process for a pre-trained XLM model:
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
export XLM_CHECKPOINT_PATH=/path/to/xlm/checkpoint
|
||||
export XLM_CHECKPOINT_PATH=/path/to/xlm/checkpoint
|
||||
|
||||
transformers-cli convert --model_type xlm \
|
||||
--tf_checkpoint $XLM_CHECKPOINT_PATH \
|
||||
--pytorch_dump_output $PYTORCH_DUMP_OUTPUT
|
||||
[--config XML_CONFIG] \
|
||||
[--finetuning_task_name XML_FINETUNED_TASK]
|
||||
transformers-cli convert --model_type xlm \
|
||||
--tf_checkpoint $XLM_CHECKPOINT_PATH \
|
||||
--pytorch_dump_output $PYTORCH_DUMP_OUTPUT
|
||||
[--config XML_CONFIG] \
|
||||
[--finetuning_task_name XML_FINETUNED_TASK]
|
||||
|
||||
|
||||
T5
|
||||
@ -173,9 +173,9 @@ Here is an example of the conversion process for a pre-trained T5 model:
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
export T5=/path/to/t5/uncased_L-12_H-768_A-12
|
||||
export T5=/path/to/t5/uncased_L-12_H-768_A-12
|
||||
|
||||
transformers-cli convert --model_type t5 \
|
||||
--tf_checkpoint $T5/t5_model.ckpt \
|
||||
--config $T5/t5_config.json \
|
||||
--pytorch_dump_output $T5/pytorch_model.bin
|
||||
transformers-cli convert --model_type t5 \
|
||||
--tf_checkpoint $T5/t5_model.ckpt \
|
||||
--config $T5/t5_config.json \
|
||||
--pytorch_dump_output $T5/pytorch_model.bin
|
||||
|
||||
@ -15,10 +15,10 @@ Fine-tuning with custom datasets
|
||||
|
||||
.. note::
|
||||
|
||||
The datasets used in this tutorial are available and can be more easily accessed using the `🤗 NLP library
|
||||
<https://github.com/huggingface/nlp>`_. We do not use this library to access the datasets here since this tutorial
|
||||
meant to illustrate how to work with your own data. A brief of introduction can be found at the end of the tutorial
|
||||
in the section ":ref:`nlplib`".
|
||||
The datasets used in this tutorial are available and can be more easily accessed using the `🤗 Datasets library
|
||||
<https://github.com/huggingface/datasets>`_. We do not use this library to access the datasets here since this
|
||||
tutorial meant to illustrate how to work with your own data. A brief of introduction can be found at the end of the
|
||||
tutorial in the section ":ref:`datasetslib`".
|
||||
|
||||
This tutorial will take you through several examples of using 🤗 Transformers models with your own datasets. The guide
|
||||
shows one of many valid workflows for using these models and is meant to be illustrative rather than definitive. We
|
||||
@ -41,7 +41,7 @@ Sequence Classification with IMDb Reviews
|
||||
.. note::
|
||||
|
||||
This dataset can be explored in the Hugging Face model hub (`IMDb <https://huggingface.co/datasets/imdb>`_), and
|
||||
can be alternatively downloaded with the 🤗 NLP library with ``load_dataset("imdb")``.
|
||||
can be alternatively downloaded with the 🤗 Datasets library with ``load_dataset("imdb")``.
|
||||
|
||||
In this example, we'll show how to download, tokenize, and train a model on the IMDb reviews dataset. This task takes
|
||||
the text of a review and requires the model to predict whether the sentiment of the review is positive or negative.
|
||||
@ -260,7 +260,7 @@ Token Classification with W-NUT Emerging Entities
|
||||
.. note::
|
||||
|
||||
This dataset can be explored in the Hugging Face model hub (`WNUT-17 <https://huggingface.co/datasets/wnut_17>`_),
|
||||
and can be alternatively downloaded with the 🤗 NLP library with ``load_dataset("wnut_17")``.
|
||||
and can be alternatively downloaded with the 🤗 Datasets library with ``load_dataset("wnut_17")``.
|
||||
|
||||
Next we will look at token classification. Rather than classifying an entire sequence, this task classifies token by
|
||||
token. We'll demonstrate how to do this with `Named Entity Recognition
|
||||
@ -459,7 +459,7 @@ Question Answering with SQuAD 2.0
|
||||
.. note::
|
||||
|
||||
This dataset can be explored in the Hugging Face model hub (`SQuAD V2
|
||||
<https://huggingface.co/datasets/squad_v2>`_), and can be alternatively downloaded with the 🤗 NLP library with
|
||||
<https://huggingface.co/datasets/squad_v2>`_), and can be alternatively downloaded with the 🤗 Datasets library with
|
||||
``load_dataset("squad_v2")``.
|
||||
|
||||
Question answering comes in many forms. In this example, we'll look at the particular type of extractive QA that
|
||||
@ -558,15 +558,14 @@ we can use the built in :func:`~transformers.BatchEncoding.char_to_token` method
|
||||
end_positions = []
|
||||
for i in range(len(answers)):
|
||||
start_positions.append(encodings.char_to_token(i, answers[i]['answer_start']))
|
||||
end_positions.append(encodings.char_to_token(i, answers[i]['answer_end']))
|
||||
end_positions.append(encodings.char_to_token(i, answers[i]['answer_end'] - 1))
|
||||
|
||||
# if start position is None, the answer passage has been truncated
|
||||
if start_positions[-1] is None:
|
||||
start_positions[-1] = tokenizer.model_max_length
|
||||
|
||||
# if end position is None, the 'char_to_token' function points to the space before the correct token - > add + 1
|
||||
if end_positions[-1] is None:
|
||||
end_positions[-1] = encodings.char_to_token(i, answers[i]['answer_end'] + 1)
|
||||
end_positions[-1] = tokenizer.model_max_length
|
||||
|
||||
encodings.update({'start_positions': start_positions, 'end_positions': end_positions})
|
||||
|
||||
add_token_positions(train_encodings, train_answers)
|
||||
@ -678,22 +677,23 @@ Additional Resources
|
||||
- :doc:`Preprocessing <preprocessing>`. Docs page on data preprocessing.
|
||||
- :doc:`Training <training>`. Docs page on training and fine-tuning.
|
||||
|
||||
.. _nlplib:
|
||||
.. _datasetslib:
|
||||
|
||||
Using the 🤗 NLP Datasets & Metrics library
|
||||
Using the 🤗 Datasets & Metrics library
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
This tutorial demonstrates how to read in datasets from various raw text formats and prepare them for training with 🤗
|
||||
Transformers so that you can do the same thing with your own custom datasets. However, we recommend users use the `🤗
|
||||
NLP library <https://github.com/huggingface/nlp>`_ for working with the 150+ datasets included in the `hub
|
||||
Datasets library <https://github.com/huggingface/datasets>`_ for working with the 150+ datasets included in the `hub
|
||||
<https://huggingface.co/datasets>`_, including the three datasets used in this tutorial. As a very brief overview, we
|
||||
will show how to use the NLP library to download and prepare the IMDb dataset from the first example, :ref:`seq_imdb`.
|
||||
will show how to use the Datasets library to download and prepare the IMDb dataset from the first example,
|
||||
:ref:`seq_imdb`.
|
||||
|
||||
Start by downloading the dataset:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from nlp import load_dataset
|
||||
from datasets import load_dataset
|
||||
train = load_dataset("imdb", split="train")
|
||||
|
||||
Each dataset has multiple columns corresponding to different features. Let's see what our columns are.
|
||||
@ -725,5 +725,5 @@ dataset elements.
|
||||
>>> {key: val.shape for key, val in train[0].items()})
|
||||
{'labels': TensorShape([]), 'input_ids': TensorShape([512]), 'attention_mask': TensorShape([512])}
|
||||
|
||||
We now have a fully-prepared dataset. Check out `the 🤗 NLP docs <https://huggingface.co/nlp/processing.html>`_ for a
|
||||
more thorough introduction.
|
||||
We now have a fully-prepared dataset. Check out `the 🤗 Datasets docs
|
||||
<https://huggingface.co/docs/datasets/processing.html>`_ for a more thorough introduction.
|
||||
|
||||
295
docs/source/debugging.rst
Normal file
295
docs/source/debugging.rst
Normal file
@ -0,0 +1,295 @@
|
||||
..
|
||||
Copyright 2021 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
|
||||
|
||||
Debugging
|
||||
=======================================================================================================================
|
||||
|
||||
Underflow and Overflow Detection
|
||||
-----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
.. note::
|
||||
|
||||
This feature is currently available for PyTorch-only.
|
||||
|
||||
.. note::
|
||||
|
||||
This feature can be used with any ``nn.Module``-based model
|
||||
|
||||
If you start getting ``loss=NaN`` or the model inhibits some other abnormal behavior due to ``inf`` or ``nan`` in
|
||||
activations or weights one needs to discover where the first underflow or overflow happens and what led to it. Luckily
|
||||
you can accomplish that easily by activating a special module that will do the detection automatically.
|
||||
|
||||
If you're using :class:`~transformers.Trainer`, you just need to add:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
--debug underflow_overflow
|
||||
|
||||
to the normal command line arguments, or pass ``debug="underflow_overflow"`` when creating the
|
||||
:class:`~transformers.TrainingArguments` object.
|
||||
|
||||
If you're using your own training loop or another Trainer you can accomplish the same with:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from .debug_utils import DebugUnderflowOverflow
|
||||
debug_overflow = DebugUnderflowOverflow(model)
|
||||
|
||||
:class:`~transformers.debug_utils.DebugUnderflowOverflow` inserts hooks into the model that immediately after each
|
||||
forward call will test input and output variables and also the corresponding module's weights. As soon as ``inf`` or
|
||||
``nan`` is detected in at least one element of the activations or weights, the program will assert and print a report
|
||||
like this (this was caught with ``google/mt5-small`` under fp16 mixed precision):
|
||||
|
||||
.. code-block::
|
||||
|
||||
Detected inf/nan during batch_number=0
|
||||
Last 21 forward frames:
|
||||
abs min abs max metadata
|
||||
encoder.block.1.layer.1.DenseReluDense.dropout Dropout
|
||||
0.00e+00 2.57e+02 input[0]
|
||||
0.00e+00 2.85e+02 output
|
||||
[...]
|
||||
encoder.block.2.layer.0 T5LayerSelfAttention
|
||||
6.78e-04 3.15e+03 input[0]
|
||||
2.65e-04 3.42e+03 output[0]
|
||||
None output[1]
|
||||
2.25e-01 1.00e+04 output[2]
|
||||
encoder.block.2.layer.1.layer_norm T5LayerNorm
|
||||
8.69e-02 4.18e-01 weight
|
||||
2.65e-04 3.42e+03 input[0]
|
||||
1.79e-06 4.65e+00 output
|
||||
encoder.block.2.layer.1.DenseReluDense.wi_0 Linear
|
||||
2.17e-07 4.50e+00 weight
|
||||
1.79e-06 4.65e+00 input[0]
|
||||
2.68e-06 3.70e+01 output
|
||||
encoder.block.2.layer.1.DenseReluDense.wi_1 Linear
|
||||
8.08e-07 2.66e+01 weight
|
||||
1.79e-06 4.65e+00 input[0]
|
||||
1.27e-04 2.37e+02 output
|
||||
encoder.block.2.layer.1.DenseReluDense.dropout Dropout
|
||||
0.00e+00 8.76e+03 input[0]
|
||||
0.00e+00 9.74e+03 output
|
||||
encoder.block.2.layer.1.DenseReluDense.wo Linear
|
||||
1.01e-06 6.44e+00 weight
|
||||
0.00e+00 9.74e+03 input[0]
|
||||
3.18e-04 6.27e+04 output
|
||||
encoder.block.2.layer.1.DenseReluDense T5DenseGatedGeluDense
|
||||
1.79e-06 4.65e+00 input[0]
|
||||
3.18e-04 6.27e+04 output
|
||||
encoder.block.2.layer.1.dropout Dropout
|
||||
3.18e-04 6.27e+04 input[0]
|
||||
0.00e+00 inf output
|
||||
|
||||
The example output has been trimmed in the middle for brevity.
|
||||
|
||||
The second column shows the value of the absolute largest element, so if you have a closer look at the last few frames,
|
||||
the inputs and outputs were in the range of ``1e4``. So when this training was done under fp16 mixed precision the very
|
||||
last step overflowed (since under ``fp16`` the largest number before ``inf`` is ``64e3``). To avoid overflows under
|
||||
``fp16`` the activations must remain way below ``1e4``, because ``1e4 * 1e4 = 1e8`` so any matrix multiplication with
|
||||
large activations is going to lead to a numerical overflow condition.
|
||||
|
||||
At the very start of the trace you can discover at which batch number the problem occurred (here ``Detected inf/nan
|
||||
during batch_number=0`` means the problem occurred on the first batch).
|
||||
|
||||
Each reported frame starts by declaring the fully qualified entry for the corresponding module this frame is reporting
|
||||
for. If we look just at this frame:
|
||||
|
||||
.. code-block::
|
||||
|
||||
encoder.block.2.layer.1.layer_norm T5LayerNorm
|
||||
8.69e-02 4.18e-01 weight
|
||||
2.65e-04 3.42e+03 input[0]
|
||||
1.79e-06 4.65e+00 output
|
||||
|
||||
Here, ``encoder.block.2.layer.1.layer_norm`` indicates that it was a layer norm for the first layer, of the second
|
||||
block of the encoder. And the specific calls of the ``forward`` is ``T5LayerNorm``.
|
||||
|
||||
Let's look at the last few frames of that report:
|
||||
|
||||
.. code-block::
|
||||
|
||||
Detected inf/nan during batch_number=0
|
||||
Last 21 forward frames:
|
||||
abs min abs max metadata
|
||||
[...]
|
||||
encoder.block.2.layer.1.DenseReluDense.wi_0 Linear
|
||||
2.17e-07 4.50e+00 weight
|
||||
1.79e-06 4.65e+00 input[0]
|
||||
2.68e-06 3.70e+01 output
|
||||
encoder.block.2.layer.1.DenseReluDense.wi_1 Linear
|
||||
8.08e-07 2.66e+01 weight
|
||||
1.79e-06 4.65e+00 input[0]
|
||||
1.27e-04 2.37e+02 output
|
||||
encoder.block.2.layer.1.DenseReluDense.wo Linear
|
||||
1.01e-06 6.44e+00 weight
|
||||
0.00e+00 9.74e+03 input[0]
|
||||
3.18e-04 6.27e+04 output
|
||||
encoder.block.2.layer.1.DenseReluDense T5DenseGatedGeluDense
|
||||
1.79e-06 4.65e+00 input[0]
|
||||
3.18e-04 6.27e+04 output
|
||||
encoder.block.2.layer.1.dropout Dropout
|
||||
3.18e-04 6.27e+04 input[0]
|
||||
0.00e+00 inf output
|
||||
|
||||
The last frame reports for ``Dropout.forward`` function with the first entry for the only input and the second for the
|
||||
only output. You can see that it was called from an attribute ``dropout`` inside ``DenseReluDense`` class. We can see
|
||||
that it happened during the first layer, of the 2nd block, during the very first batch. Finally, the absolute largest
|
||||
input elements was ``6.27e+04`` and same for the output was ``inf``.
|
||||
|
||||
You can see here, that ``T5DenseGatedGeluDense.forward`` resulted in output activations, whose absolute max value was
|
||||
around 62.7K, which is very close to fp16's top limit of 64K. In the next frame we have ``Dropout`` which renormalizes
|
||||
the weights, after it zeroed some of the elements, which pushes the absolute max value to more than 64K, and we get an
|
||||
overlow (``inf``).
|
||||
|
||||
As you can see it's the previous frames that we need to look into when the numbers start going into very large for fp16
|
||||
numbers.
|
||||
|
||||
Let's match the report to the code from ``models/t5/modeling_t5.py``:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
class T5DenseGatedGeluDense(nn.Module):
|
||||
def __init__(self, config):
|
||||
super().__init__()
|
||||
self.wi_0 = nn.Linear(config.d_model, config.d_ff, bias=False)
|
||||
self.wi_1 = nn.Linear(config.d_model, config.d_ff, bias=False)
|
||||
self.wo = nn.Linear(config.d_ff, config.d_model, bias=False)
|
||||
self.dropout = nn.Dropout(config.dropout_rate)
|
||||
self.gelu_act = ACT2FN["gelu_new"]
|
||||
|
||||
def forward(self, hidden_states):
|
||||
hidden_gelu = self.gelu_act(self.wi_0(hidden_states))
|
||||
hidden_linear = self.wi_1(hidden_states)
|
||||
hidden_states = hidden_gelu * hidden_linear
|
||||
hidden_states = self.dropout(hidden_states)
|
||||
hidden_states = self.wo(hidden_states)
|
||||
return hidden_states
|
||||
|
||||
Now it's easy to see the ``dropout`` call, and all the previous calls as well.
|
||||
|
||||
Since the detection is happening in a forward hook, these reports are printed immediately after each ``forward``
|
||||
returns.
|
||||
|
||||
Going back to the full report, to act on it and to fix the problem, we need to go a few frames up where the numbers
|
||||
started to go up and most likely switch to the ``fp32`` mode here, so that the numbers don't overflow when multiplied
|
||||
or summed up. Of course, there might be other solutions. For example, we could turn off ``amp`` temporarily if it's
|
||||
enabled, after moving the original ``forward`` into a helper wrapper, like so:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
def _forward(self, hidden_states):
|
||||
hidden_gelu = self.gelu_act(self.wi_0(hidden_states))
|
||||
hidden_linear = self.wi_1(hidden_states)
|
||||
hidden_states = hidden_gelu * hidden_linear
|
||||
hidden_states = self.dropout(hidden_states)
|
||||
hidden_states = self.wo(hidden_states)
|
||||
return hidden_states
|
||||
|
||||
import torch
|
||||
def forward(self, hidden_states):
|
||||
if torch.is_autocast_enabled():
|
||||
with torch.cuda.amp.autocast(enabled=False):
|
||||
return self._forward(hidden_states)
|
||||
else:
|
||||
return self._forward(hidden_states)
|
||||
|
||||
Since the automatic detector only reports on inputs and outputs of full frames, once you know where to look, you may
|
||||
want to analyse the intermediary stages of any specific ``forward`` function as well. In such a case you can use the
|
||||
``detect_overflow`` helper function to inject the detector where you want it, for example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from debug_utils import detect_overflow
|
||||
|
||||
class T5LayerFF(nn.Module):
|
||||
[...]
|
||||
def forward(self, hidden_states):
|
||||
forwarded_states = self.layer_norm(hidden_states)
|
||||
detect_overflow(forwarded_states, "after layer_norm")
|
||||
forwarded_states = self.DenseReluDense(forwarded_states)
|
||||
detect_overflow(forwarded_states, "after DenseReluDense")
|
||||
return hidden_states + self.dropout(forwarded_states)
|
||||
|
||||
You can see that we added 2 of these and now we track if ``inf`` or ``nan`` for ``forwarded_states`` was detected
|
||||
somewhere in between.
|
||||
|
||||
Actually, the detector already reports these because each of the calls in the example above is a `nn.Module``, but
|
||||
let's say if you had some local direct calculations this is how you'd do that.
|
||||
|
||||
Additionally, if you're instantiating the debugger in your own code, you can adjust the number of frames printed from
|
||||
its default, e.g.:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from .debug_utils import DebugUnderflowOverflow
|
||||
debug_overflow = DebugUnderflowOverflow(model, max_frames_to_save=100)
|
||||
|
||||
Specific batch absolute mix and max value tracing
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The same debugging class can be used for per-batch tracing with the underflow/overflow detection feature turned off.
|
||||
|
||||
Let's say you want to watch the absolute min and max values for all the ingredients of each ``forward`` call of a given
|
||||
batch, and only do that for batches 1 and 3. Then you instantiate this class as:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
debug_overflow = DebugUnderflowOverflow(model, trace_batch_nums=[1,3])
|
||||
|
||||
And now full batches 1 and 3 will be traced using the same format as the underflow/overflow detector does.
|
||||
|
||||
Batches are 0-indexed.
|
||||
|
||||
This is helpful if you know that the program starts misbehaving after a certain batch number, so you can fast-forward
|
||||
right to that area. Here is a sample truncated output for such configuration:
|
||||
|
||||
.. code-block::
|
||||
|
||||
*** Starting batch number=1 ***
|
||||
abs min abs max metadata
|
||||
shared Embedding
|
||||
1.01e-06 7.92e+02 weight
|
||||
0.00e+00 2.47e+04 input[0]
|
||||
5.36e-05 7.92e+02 output
|
||||
[...]
|
||||
decoder.dropout Dropout
|
||||
1.60e-07 2.27e+01 input[0]
|
||||
0.00e+00 2.52e+01 output
|
||||
decoder T5Stack
|
||||
not a tensor output
|
||||
lm_head Linear
|
||||
1.01e-06 7.92e+02 weight
|
||||
0.00e+00 1.11e+00 input[0]
|
||||
6.06e-02 8.39e+01 output
|
||||
T5ForConditionalGeneration
|
||||
not a tensor output
|
||||
|
||||
*** Starting batch number=3 ***
|
||||
abs min abs max metadata
|
||||
shared Embedding
|
||||
1.01e-06 7.92e+02 weight
|
||||
0.00e+00 2.78e+04 input[0]
|
||||
5.36e-05 7.92e+02 output
|
||||
[...]
|
||||
|
||||
Here you will get a huge number of frames dumped - as many as there were forward calls in your model, so it may or may
|
||||
not what you want, but sometimes it can be easier to use for debugging purposes than a normal debugger. For example, if
|
||||
a problem starts happening at batch number 150. So you can dump traces for batches 149 and 150 and compare where
|
||||
numbers started to diverge.
|
||||
|
||||
You can also specify the batch number after which to stop the training, with:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
debug_overflow = DebugUnderflowOverflow(model, trace_batch_nums=[1,3], abort_after_batch_num=3)
|
||||
62
docs/source/fast_tokenizers.rst
Normal file
62
docs/source/fast_tokenizers.rst
Normal file
@ -0,0 +1,62 @@
|
||||
Using tokenizers from 🤗 Tokenizers
|
||||
=======================================================================================================================
|
||||
|
||||
The :class:`~transformers.PreTrainedTokenizerFast` depends on the `tokenizers
|
||||
<https://huggingface.co/docs/tokenizers>`__ library. The tokenizers obtained from the 🤗 Tokenizers library can be
|
||||
loaded very simply into 🤗 Transformers.
|
||||
|
||||
Before getting in the specifics, let's first start by creating a dummy tokenizer in a few lines:
|
||||
|
||||
.. code-block::
|
||||
|
||||
>>> from tokenizers import Tokenizer
|
||||
>>> from tokenizers.models import BPE
|
||||
>>> from tokenizers.trainers import BpeTrainer
|
||||
>>> from tokenizers.pre_tokenizers import Whitespace
|
||||
|
||||
>>> tokenizer = Tokenizer(BPE(unk_token="[UNK]"))
|
||||
>>> trainer = BpeTrainer(special_tokens=["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]"])
|
||||
|
||||
>>> tokenizer.pre_tokenizer = Whitespace()
|
||||
>>> files = [...]
|
||||
>>> tokenizer.train(files, trainer)
|
||||
|
||||
We now have a tokenizer trained on the files we defined. We can either continue using it in that runtime, or save it to
|
||||
a JSON file for future re-use.
|
||||
|
||||
Loading directly from the tokenizer object
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Let's see how to leverage this tokenizer object in the 🤗 Transformers library. The
|
||||
:class:`~transformers.PreTrainedTokenizerFast` class allows for easy instantiation, by accepting the instantiated
|
||||
`tokenizer` object as an argument:
|
||||
|
||||
.. code-block::
|
||||
|
||||
>>> from transformers import PreTrainedTokenizerFast
|
||||
|
||||
>>> fast_tokenizer = PreTrainedTokenizerFast(tokenizer_object=tokenizer)
|
||||
|
||||
This object can now be used with all the methods shared by the 🤗 Transformers tokenizers! Head to :doc:`the tokenizer
|
||||
page <main_classes/tokenizer>` for more information.
|
||||
|
||||
Loading from a JSON file
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
In order to load a tokenizer from a JSON file, let's first start by saving our tokenizer:
|
||||
|
||||
.. code-block::
|
||||
|
||||
>>> tokenizer.save("tokenizer.json")
|
||||
|
||||
The path to which we saved this file can be passed to the :class:`~transformers.PreTrainedTokenizerFast` initialization
|
||||
method using the :obj:`tokenizer_file` parameter:
|
||||
|
||||
.. code-block::
|
||||
|
||||
>>> from transformers import PreTrainedTokenizerFast
|
||||
|
||||
>>> fast_tokenizer = PreTrainedTokenizerFast(tokenizer_file="tokenizer.json")
|
||||
|
||||
This object can now be used with all the methods shared by the 🤗 Transformers tokenizers! Head to :doc:`the tokenizer
|
||||
page <main_classes/tokenizer>` for more information.
|
||||
@ -21,6 +21,7 @@ General terms
|
||||
- CLM: causal language modeling, a pretraining task where the model reads the texts in order and has to predict the
|
||||
next word. It's usually done by reading the whole sentence but using a mask inside the model to hide the future
|
||||
tokens at a certain timestep.
|
||||
- deep learning: machine learning algorithms which uses neural networks with several layers.
|
||||
- MLM: masked language modeling, a pretraining task where the model sees a corrupted version of the texts, usually done
|
||||
by masking some tokens randomly, and has to predict the original text.
|
||||
- multimodal: a task that combines texts with another kind of inputs (for instance images).
|
||||
@ -33,10 +34,12 @@ General terms
|
||||
involve a self-supervised objective, which can be reading the text and trying to predict the next word (see CLM) or
|
||||
masking some words and trying to predict them (see MLM).
|
||||
- RNN: recurrent neural network, a type of model that uses a loop over a layer to process texts.
|
||||
- self-attention: each element of the input finds out which other elements of the input they should attend to.
|
||||
- seq2seq or sequence-to-sequence: models that generate a new sequence from an input, like translation models, or
|
||||
summarization models (such as :doc:`Bart </model_doc/bart>` or :doc:`T5 </model_doc/t5>`).
|
||||
- token: a part of a sentence, usually a word, but can also be a subword (non-common words are often split in subwords)
|
||||
or a punctuation symbol.
|
||||
- transformer: self-attention based deep learning model architecture.
|
||||
|
||||
Model inputs
|
||||
-----------------------------------------------------------------------------------------------------------------------
|
||||
@ -179,7 +182,7 @@ such:
|
||||
|
||||
.. code-block::
|
||||
|
||||
>>> # [CLS] SEQUENCE_A [SEP] SEQUENCE_B [SEP]
|
||||
>>> # [CLS] SEQUENCE_A [SEP] SEQUENCE_B [SEP]
|
||||
|
||||
We can use our tokenizer to automatically generate such a sentence by passing the two sequences to ``tokenizer`` as two
|
||||
arguments (and not a list, like before) like this:
|
||||
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 145 KiB After Width: | Height: | Size: 691 KiB |
@ -1,12 +1,12 @@
|
||||
Transformers
|
||||
=======================================================================================================================
|
||||
|
||||
State-of-the-art Natural Language Processing for Pytorch and TensorFlow 2.0.
|
||||
State-of-the-art Natural Language Processing for Jax, Pytorch and TensorFlow
|
||||
|
||||
🤗 Transformers (formerly known as `pytorch-transformers` and `pytorch-pretrained-bert`) provides general-purpose
|
||||
architectures (BERT, GPT-2, RoBERTa, XLM, DistilBert, XLNet...) for Natural Language Understanding (NLU) and Natural
|
||||
Language Generation (NLG) with over 32+ pretrained models in 100+ languages and deep interoperability between
|
||||
TensorFlow 2.0 and PyTorch.
|
||||
Language Generation (NLG) with over 32+ pretrained models in 100+ languages and deep interoperability between Jax,
|
||||
PyTorch and TensorFlow.
|
||||
|
||||
This is the documentation of our repository `transformers <https://github.com/huggingface/transformers>`_.
|
||||
|
||||
@ -22,7 +22,7 @@ State-of-the-art NLP for everyone:
|
||||
- Hands-on practitioners
|
||||
- AI/ML/NLP teachers and educators
|
||||
|
||||
..
|
||||
..
|
||||
Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
@ -43,11 +43,11 @@ Lower compute costs, smaller carbon footprint:
|
||||
Choose the right framework for every part of a model's lifetime:
|
||||
|
||||
- Train state-of-the-art models in 3 lines of code
|
||||
- Deep interoperability between TensorFlow 2.0 and PyTorch models
|
||||
- Move a single model between TF2.0/PyTorch frameworks at will
|
||||
- Deep interoperability between Jax, Pytorch and TensorFlow models
|
||||
- Move a single model between Jax/PyTorch/TensorFlow frameworks at will
|
||||
- Seamlessly pick the right framework for training, evaluation, production
|
||||
|
||||
Experimental support for Flax with a few models right now, expected to grow in the coming months.
|
||||
The support for Jax is still experimental (with a few models right now), expect to see it grow in the coming months!
|
||||
|
||||
`All the model checkpoints <https://huggingface.co/models>`__ are seamlessly integrated from the huggingface.co `model
|
||||
hub <https://huggingface.co>`__ where they are uploaded directly by `users <https://huggingface.co/users>`__ and
|
||||
@ -74,8 +74,8 @@ The documentation is organized in five parts:
|
||||
- **MODELS** for the classes and functions related to each model implemented in the library.
|
||||
- **INTERNAL HELPERS** for the classes and functions we use internally.
|
||||
|
||||
The library currently contains PyTorch, Tensorflow and Flax implementations, pretrained model weights, usage scripts
|
||||
and conversion utilities for the following models:
|
||||
The library currently contains Jax, PyTorch and Tensorflow implementations, pretrained model weights, usage scripts and
|
||||
conversion utilities for the following models:
|
||||
|
||||
..
|
||||
This list is updated automatically from the README with `make fix-copies`. Do not update manually!
|
||||
@ -97,121 +97,172 @@ and conversion utilities for the following models:
|
||||
5. :doc:`BERT For Sequence Generation <model_doc/bertgeneration>` (from Google) released with the paper `Leveraging
|
||||
Pre-trained Checkpoints for Sequence Generation Tasks <https://arxiv.org/abs/1907.12461>`__ by Sascha Rothe, Shashi
|
||||
Narayan, Aliaksei Severyn.
|
||||
6. :doc:`Blenderbot <model_doc/blenderbot>` (from Facebook) released with the paper `Recipes for building an
|
||||
6. :doc:`BigBird-RoBERTa <model_doc/bigbird>` (from Google Research) released with the paper `Big Bird: Transformers
|
||||
for Longer Sequences <https://arxiv.org/abs/2007.14062>`__ by Manzil Zaheer, Guru Guruganesh, Avinava Dubey, Joshua
|
||||
Ainslie, Chris Alberti, Santiago Ontanon, Philip Pham, Anirudh Ravula, Qifan Wang, Li Yang, Amr Ahmed.
|
||||
7. :doc:`BigBird-Pegasus <model_doc/bigbird_pegasus>` (from Google Research) released with the paper `Big Bird:
|
||||
Transformers for Longer Sequences <https://arxiv.org/abs/2007.14062>`__ by Manzil Zaheer, Guru Guruganesh, Avinava
|
||||
Dubey, Joshua Ainslie, Chris Alberti, Santiago Ontanon, Philip Pham, Anirudh Ravula, Qifan Wang, Li Yang, Amr Ahmed.
|
||||
8. :doc:`Blenderbot <model_doc/blenderbot>` (from Facebook) released with the paper `Recipes for building an
|
||||
open-domain chatbot <https://arxiv.org/abs/2004.13637>`__ by Stephen Roller, Emily Dinan, Naman Goyal, Da Ju, Mary
|
||||
Williamson, Yinhan Liu, Jing Xu, Myle Ott, Kurt Shuster, Eric M. Smith, Y-Lan Boureau, Jason Weston.
|
||||
7. :doc:`BlenderbotSmall <model_doc/blenderbot_small>` (from Facebook) released with the paper `Recipes for building an
|
||||
9. :doc:`BlenderbotSmall <model_doc/blenderbot_small>` (from Facebook) released with the paper `Recipes for building an
|
||||
open-domain chatbot <https://arxiv.org/abs/2004.13637>`__ by Stephen Roller, Emily Dinan, Naman Goyal, Da Ju, Mary
|
||||
Williamson, Yinhan Liu, Jing Xu, Myle Ott, Kurt Shuster, Eric M. Smith, Y-Lan Boureau, Jason Weston.
|
||||
8. :doc:`BORT <model_doc/bort>` (from Alexa) released with the paper `Optimal Subarchitecture Extraction For BERT
|
||||
<https://arxiv.org/abs/2010.10499>`__ by Adrian de Wynter and Daniel J. Perry.
|
||||
9. :doc:`CamemBERT <model_doc/camembert>` (from Inria/Facebook/Sorbonne) released with the paper `CamemBERT: a Tasty
|
||||
French Language Model <https://arxiv.org/abs/1911.03894>`__ by Louis Martin*, Benjamin Muller*, Pedro Javier Ortiz
|
||||
Suárez*, Yoann Dupont, Laurent Romary, Éric Villemonte de la Clergerie, Djamé Seddah and Benoît Sagot.
|
||||
10. :doc:`ConvBERT <model_doc/convbert>` (from YituTech) released with the paper `ConvBERT: Improving BERT with
|
||||
10. :doc:`BORT <model_doc/bort>` (from Alexa) released with the paper `Optimal Subarchitecture Extraction For BERT
|
||||
<https://arxiv.org/abs/2010.10499>`__ by Adrian de Wynter and Daniel J. Perry.
|
||||
11. :doc:`CamemBERT <model_doc/camembert>` (from Inria/Facebook/Sorbonne) released with the paper `CamemBERT: a Tasty
|
||||
French Language Model <https://arxiv.org/abs/1911.03894>`__ by Louis Martin*, Benjamin Muller*, Pedro Javier Ortiz
|
||||
Suárez*, Yoann Dupont, Laurent Romary, Éric Villemonte de la Clergerie, Djamé Seddah and Benoît Sagot.
|
||||
12. :doc:`CLIP <model_doc/clip>` from (OpenAI) released with the paper `Learning Transferable Visual Models From
|
||||
Natural Language Supervision <https://arxiv.org/abs/2103.00020>`__ by Alec Radford, Jong Wook Kim, Chris Hallacy,
|
||||
Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen
|
||||
Krueger, Ilya Sutskever.
|
||||
13. :doc:`ConvBERT <model_doc/convbert>` (from YituTech) released with the paper `ConvBERT: Improving BERT with
|
||||
Span-based Dynamic Convolution <https://arxiv.org/abs/2008.02496>`__ by Zihang Jiang, Weihao Yu, Daquan Zhou,
|
||||
Yunpeng Chen, Jiashi Feng, Shuicheng Yan.
|
||||
11. :doc:`CTRL <model_doc/ctrl>` (from Salesforce) released with the paper `CTRL: A Conditional Transformer Language
|
||||
14. :doc:`CPM <model_doc/cpm>` (from Tsinghua University) released with the paper `CPM: A Large-scale Generative
|
||||
Chinese Pre-trained Language Model <https://arxiv.org/abs/2012.00413>`__ by Zhengyan Zhang, Xu Han, Hao Zhou, Pei
|
||||
Ke, Yuxian Gu, Deming Ye, Yujia Qin, Yusheng Su, Haozhe Ji, Jian Guan, Fanchao Qi, Xiaozhi Wang, Yanan Zheng,
|
||||
Guoyang Zeng, Huanqi Cao, Shengqi Chen, Daixuan Li, Zhenbo Sun, Zhiyuan Liu, Minlie Huang, Wentao Han, Jie Tang,
|
||||
Juanzi Li, Xiaoyan Zhu, Maosong Sun.
|
||||
15. :doc:`CTRL <model_doc/ctrl>` (from Salesforce) released with the paper `CTRL: A Conditional Transformer Language
|
||||
Model for Controllable Generation <https://arxiv.org/abs/1909.05858>`__ by Nitish Shirish Keskar*, Bryan McCann*,
|
||||
Lav R. Varshney, Caiming Xiong and Richard Socher.
|
||||
12. :doc:`DeBERTa <model_doc/deberta>` (from Microsoft Research) released with the paper `DeBERTa: Decoding-enhanced
|
||||
BERT with Disentangled Attention <https://arxiv.org/abs/2006.03654>`__ by Pengcheng He, Xiaodong Liu, Jianfeng Gao,
|
||||
16. :doc:`DeBERTa <model_doc/deberta>` (from Microsoft) released with the paper `DeBERTa: Decoding-enhanced BERT with
|
||||
Disentangled Attention <https://arxiv.org/abs/2006.03654>`__ by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu
|
||||
Chen.
|
||||
17. :doc:`DeBERTa-v2 <model_doc/deberta_v2>` (from Microsoft) released with the paper `DeBERTa: Decoding-enhanced BERT
|
||||
with Disentangled Attention <https://arxiv.org/abs/2006.03654>`__ by Pengcheng He, Xiaodong Liu, Jianfeng Gao,
|
||||
Weizhu Chen.
|
||||
13. :doc:`DialoGPT <model_doc/dialogpt>` (from Microsoft Research) released with the paper `DialoGPT: Large-Scale
|
||||
18. :doc:`DeiT <model_doc/deit>` (from Facebook) released with the paper `Training data-efficient image transformers &
|
||||
distillation through attention <https://arxiv.org/abs/2012.12877>`__ by Hugo Touvron, Matthieu Cord, Matthijs
|
||||
Douze, Francisco Massa, Alexandre Sablayrolles, Hervé Jégou.
|
||||
19. :doc:`DialoGPT <model_doc/dialogpt>` (from Microsoft Research) released with the paper `DialoGPT: Large-Scale
|
||||
Generative Pre-training for Conversational Response Generation <https://arxiv.org/abs/1911.00536>`__ by Yizhe
|
||||
Zhang, Siqi Sun, Michel Galley, Yen-Chun Chen, Chris Brockett, Xiang Gao, Jianfeng Gao, Jingjing Liu, Bill Dolan.
|
||||
14. :doc:`DistilBERT <model_doc/distilbert>` (from HuggingFace), released together with the paper `DistilBERT, a
|
||||
20. :doc:`DistilBERT <model_doc/distilbert>` (from HuggingFace), released together with the paper `DistilBERT, a
|
||||
distilled version of BERT: smaller, faster, cheaper and lighter <https://arxiv.org/abs/1910.01108>`__ by Victor
|
||||
Sanh, Lysandre Debut and Thomas Wolf. The same method has been applied to compress GPT2 into `DistilGPT2
|
||||
<https://github.com/huggingface/transformers/tree/master/examples/distillation>`__, RoBERTa into `DistilRoBERTa
|
||||
<https://github.com/huggingface/transformers/tree/master/examples/distillation>`__, Multilingual BERT into
|
||||
`DistilmBERT <https://github.com/huggingface/transformers/tree/master/examples/distillation>`__ and a German
|
||||
version of DistilBERT.
|
||||
15. :doc:`DPR <model_doc/dpr>` (from Facebook) released with the paper `Dense Passage Retrieval for Open-Domain
|
||||
21. :doc:`DPR <model_doc/dpr>` (from Facebook) released with the paper `Dense Passage Retrieval for Open-Domain
|
||||
Question Answering <https://arxiv.org/abs/2004.04906>`__ by Vladimir Karpukhin, Barlas Oğuz, Sewon Min, Patrick
|
||||
Lewis, Ledell Wu, Sergey Edunov, Danqi Chen, and Wen-tau Yih.
|
||||
16. :doc:`ELECTRA <model_doc/electra>` (from Google Research/Stanford University) released with the paper `ELECTRA:
|
||||
22. :doc:`ELECTRA <model_doc/electra>` (from Google Research/Stanford University) released with the paper `ELECTRA:
|
||||
Pre-training text encoders as discriminators rather than generators <https://arxiv.org/abs/2003.10555>`__ by Kevin
|
||||
Clark, Minh-Thang Luong, Quoc V. Le, Christopher D. Manning.
|
||||
17. :doc:`FlauBERT <model_doc/flaubert>` (from CNRS) released with the paper `FlauBERT: Unsupervised Language Model
|
||||
23. :doc:`FlauBERT <model_doc/flaubert>` (from CNRS) released with the paper `FlauBERT: Unsupervised Language Model
|
||||
Pre-training for French <https://arxiv.org/abs/1912.05372>`__ by Hang Le, Loïc Vial, Jibril Frej, Vincent Segonne,
|
||||
Maximin Coavoux, Benjamin Lecouteux, Alexandre Allauzen, Benoît Crabbé, Laurent Besacier, Didier Schwab.
|
||||
18. :doc:`Funnel Transformer <model_doc/funnel>` (from CMU/Google Brain) released with the paper `Funnel-Transformer:
|
||||
24. :doc:`Funnel Transformer <model_doc/funnel>` (from CMU/Google Brain) released with the paper `Funnel-Transformer:
|
||||
Filtering out Sequential Redundancy for Efficient Language Processing <https://arxiv.org/abs/2006.03236>`__ by
|
||||
Zihang Dai, Guokun Lai, Yiming Yang, Quoc V. Le.
|
||||
19. :doc:`GPT <model_doc/gpt>` (from OpenAI) released with the paper `Improving Language Understanding by Generative
|
||||
25. :doc:`GPT <model_doc/gpt>` (from OpenAI) released with the paper `Improving Language Understanding by Generative
|
||||
Pre-Training <https://blog.openai.com/language-unsupervised/>`__ by Alec Radford, Karthik Narasimhan, Tim Salimans
|
||||
and Ilya Sutskever.
|
||||
20. :doc:`GPT-2 <model_doc/gpt2>` (from OpenAI) released with the paper `Language Models are Unsupervised Multitask
|
||||
26. :doc:`GPT-2 <model_doc/gpt2>` (from OpenAI) released with the paper `Language Models are Unsupervised Multitask
|
||||
Learners <https://blog.openai.com/better-language-models/>`__ by Alec Radford*, Jeffrey Wu*, Rewon Child, David
|
||||
Luan, Dario Amodei** and Ilya Sutskever**.
|
||||
21. :doc:`LayoutLM <model_doc/layoutlm>` (from Microsoft Research Asia) released with the paper `LayoutLM: Pre-training
|
||||
27. :doc:`GPT Neo <model_doc/gpt_neo>` (from EleutherAI) released in the repository `EleutherAI/gpt-neo
|
||||
<https://github.com/EleutherAI/gpt-neo>`__ by Sid Black, Stella Biderman, Leo Gao, Phil Wang and Connor Leahy.
|
||||
28. :doc:`I-BERT <model_doc/ibert>` (from Berkeley) released with the paper `I-BERT: Integer-only BERT Quantization
|
||||
<https://arxiv.org/abs/2101.01321>`__ by Sehoon Kim, Amir Gholami, Zhewei Yao, Michael W. Mahoney, Kurt Keutzer
|
||||
29. :doc:`LayoutLM <model_doc/layoutlm>` (from Microsoft Research Asia) released with the paper `LayoutLM: Pre-training
|
||||
of Text and Layout for Document Image Understanding <https://arxiv.org/abs/1912.13318>`__ by Yiheng Xu, Minghao Li,
|
||||
Lei Cui, Shaohan Huang, Furu Wei, Ming Zhou.
|
||||
22. :doc:`LED <model_doc/led>` (from AllenAI) released with the paper `Longformer: The Long-Document Transformer
|
||||
30. :doc:`LED <model_doc/led>` (from AllenAI) released with the paper `Longformer: The Long-Document Transformer
|
||||
<https://arxiv.org/abs/2004.05150>`__ by Iz Beltagy, Matthew E. Peters, Arman Cohan.
|
||||
23. :doc:`Longformer <model_doc/longformer>` (from AllenAI) released with the paper `Longformer: The Long-Document
|
||||
31. :doc:`Longformer <model_doc/longformer>` (from AllenAI) released with the paper `Longformer: The Long-Document
|
||||
Transformer <https://arxiv.org/abs/2004.05150>`__ by Iz Beltagy, Matthew E. Peters, Arman Cohan.
|
||||
24. :doc:`LXMERT <model_doc/lxmert>` (from UNC Chapel Hill) released with the paper `LXMERT: Learning Cross-Modality
|
||||
32. :doc:`LUKE <model_doc/luke>` (from Studio Ousia) released with the paper `LUKE: Deep Contextualized Entity
|
||||
Representations with Entity-aware Self-attention <https://arxiv.org/abs/2010.01057>`__ by Ikuya Yamada, Akari Asai,
|
||||
Hiroyuki Shindo, Hideaki Takeda, Yuji Matsumoto.
|
||||
33. :doc:`LXMERT <model_doc/lxmert>` (from UNC Chapel Hill) released with the paper `LXMERT: Learning Cross-Modality
|
||||
Encoder Representations from Transformers for Open-Domain Question Answering <https://arxiv.org/abs/1908.07490>`__
|
||||
by Hao Tan and Mohit Bansal.
|
||||
25. :doc:`MarianMT <model_doc/marian>` Machine translation models trained using `OPUS <http://opus.nlpl.eu/>`__ data by
|
||||
34. :doc:`M2M100 <model_doc/m2m_100>` (from Facebook) released with the paper `Beyond English-Centric Multilingual
|
||||
Machine Translation <https://arxiv.org/abs/2010.11125>`__ by by Angela Fan, Shruti Bhosale, Holger Schwenk, Zhiyi
|
||||
Ma, Ahmed El-Kishky, Siddharth Goyal, Mandeep Baines, Onur Celebi, Guillaume Wenzek, Vishrav Chaudhary, Naman
|
||||
Goyal, Tom Birch, Vitaliy Liptchinsky, Sergey Edunov, Edouard Grave, Michael Auli, Armand Joulin.
|
||||
35. :doc:`MarianMT <model_doc/marian>` Machine translation models trained using `OPUS <http://opus.nlpl.eu/>`__ data by
|
||||
Jörg Tiedemann. The `Marian Framework <https://marian-nmt.github.io/>`__ is being developed by the Microsoft
|
||||
Translator Team.
|
||||
26. :doc:`MBart <model_doc/mbart>` (from Facebook) released with the paper `Multilingual Denoising Pre-training for
|
||||
36. :doc:`MBart <model_doc/mbart>` (from Facebook) released with the paper `Multilingual Denoising Pre-training for
|
||||
Neural Machine Translation <https://arxiv.org/abs/2001.08210>`__ by Yinhan Liu, Jiatao Gu, Naman Goyal, Xian Li,
|
||||
Sergey Edunov, Marjan Ghazvininejad, Mike Lewis, Luke Zettlemoyer.
|
||||
27. :doc:`MPNet <model_doc/mpnet>` (from Microsoft Research) released with the paper `MPNet: Masked and Permuted
|
||||
37. :doc:`MBart-50 <model_doc/mbart>` (from Facebook) released with the paper `Multilingual Translation with Extensible
|
||||
Multilingual Pretraining and Finetuning <https://arxiv.org/abs/2008.00401>`__ by Yuqing Tang, Chau Tran, Xian Li,
|
||||
Peng-Jen Chen, Naman Goyal, Vishrav Chaudhary, Jiatao Gu, Angela Fan.
|
||||
38. :doc:`Megatron-BERT <model_doc/megatron_bert>` (from NVIDIA) released with the paper `Megatron-LM: Training
|
||||
Multi-Billion Parameter Language Models Using Model Parallelism <https://arxiv.org/abs/1909.08053>`__ by Mohammad
|
||||
Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper and Bryan Catanzaro.
|
||||
39. :doc:`Megatron-GPT2 <model_doc/megatron_gpt2>` (from NVIDIA) released with the paper `Megatron-LM: Training
|
||||
Multi-Billion Parameter Language Models Using Model Parallelism <https://arxiv.org/abs/1909.08053>`__ by Mohammad
|
||||
Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper and Bryan Catanzaro.
|
||||
40. :doc:`MPNet <model_doc/mpnet>` (from Microsoft Research) released with the paper `MPNet: Masked and Permuted
|
||||
Pre-training for Language Understanding <https://arxiv.org/abs/2004.09297>`__ by Kaitao Song, Xu Tan, Tao Qin,
|
||||
Jianfeng Lu, Tie-Yan Liu.
|
||||
28. :doc:`MT5 <model_doc/mt5>` (from Google AI) released with the paper `mT5: A massively multilingual pre-trained
|
||||
41. :doc:`MT5 <model_doc/mt5>` (from Google AI) released with the paper `mT5: A massively multilingual pre-trained
|
||||
text-to-text transformer <https://arxiv.org/abs/2010.11934>`__ by Linting Xue, Noah Constant, Adam Roberts, Mihir
|
||||
Kale, Rami Al-Rfou, Aditya Siddhant, Aditya Barua, Colin Raffel.
|
||||
29. :doc:`Pegasus <model_doc/pegasus>` (from Google) released with the paper `PEGASUS: Pre-training with Extracted
|
||||
42. :doc:`Pegasus <model_doc/pegasus>` (from Google) released with the paper `PEGASUS: Pre-training with Extracted
|
||||
Gap-sentences for Abstractive Summarization <https://arxiv.org/abs/1912.08777>`__> by Jingqing Zhang, Yao Zhao,
|
||||
Mohammad Saleh and Peter J. Liu.
|
||||
30. :doc:`ProphetNet <model_doc/prophetnet>` (from Microsoft Research) released with the paper `ProphetNet: Predicting
|
||||
43. :doc:`ProphetNet <model_doc/prophetnet>` (from Microsoft Research) released with the paper `ProphetNet: Predicting
|
||||
Future N-gram for Sequence-to-Sequence Pre-training <https://arxiv.org/abs/2001.04063>`__ by Yu Yan, Weizhen Qi,
|
||||
Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang and Ming Zhou.
|
||||
31. :doc:`Reformer <model_doc/reformer>` (from Google Research) released with the paper `Reformer: The Efficient
|
||||
44. :doc:`Reformer <model_doc/reformer>` (from Google Research) released with the paper `Reformer: The Efficient
|
||||
Transformer <https://arxiv.org/abs/2001.04451>`__ by Nikita Kitaev, Łukasz Kaiser, Anselm Levskaya.
|
||||
32. :doc:`RoBERTa <model_doc/roberta>` (from Facebook), released together with the paper a `Robustly Optimized BERT
|
||||
45. :doc:`RoBERTa <model_doc/roberta>` (from Facebook), released together with the paper a `Robustly Optimized BERT
|
||||
Pretraining Approach <https://arxiv.org/abs/1907.11692>`__ by Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar
|
||||
Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, Veselin Stoyanov.
|
||||
33. :doc:`SqueezeBert <model_doc/squeezebert>` released with the paper `SqueezeBERT: What can computer vision teach NLP
|
||||
46. :doc:`SpeechToTextTransformer <model_doc/speech_to_text>` (from Facebook), released together with the paper
|
||||
`fairseq S2T: Fast Speech-to-Text Modeling with fairseq <https://arxiv.org/abs/2010.05171>`__ by Changhan Wang, Yun
|
||||
Tang, Xutai Ma, Anne Wu, Dmytro Okhonko, Juan Pino.
|
||||
47. :doc:`SqueezeBert <model_doc/squeezebert>` released with the paper `SqueezeBERT: What can computer vision teach NLP
|
||||
about efficient neural networks? <https://arxiv.org/abs/2006.11316>`__ by Forrest N. Iandola, Albert E. Shaw, Ravi
|
||||
Krishna, and Kurt W. Keutzer.
|
||||
34. :doc:`T5 <model_doc/t5>` (from Google AI) released with the paper `Exploring the Limits of Transfer Learning with a
|
||||
48. :doc:`T5 <model_doc/t5>` (from Google AI) released with the paper `Exploring the Limits of Transfer Learning with a
|
||||
Unified Text-to-Text Transformer <https://arxiv.org/abs/1910.10683>`__ by Colin Raffel and Noam Shazeer and Adam
|
||||
Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu.
|
||||
35. :doc:`TAPAS <model_doc/tapas>` (from Google AI) released with the paper `TAPAS: Weakly Supervised Table Parsing via
|
||||
49. :doc:`TAPAS <model_doc/tapas>` (from Google AI) released with the paper `TAPAS: Weakly Supervised Table Parsing via
|
||||
Pre-training <https://arxiv.org/abs/2004.02349>`__ by Jonathan Herzig, Paweł Krzysztof Nowak, Thomas Müller,
|
||||
Francesco Piccinno and Julian Martin Eisenschlos.
|
||||
36. :doc:`Transformer-XL <model_doc/transformerxl>` (from Google/CMU) released with the paper `Transformer-XL:
|
||||
50. :doc:`Transformer-XL <model_doc/transformerxl>` (from Google/CMU) released with the paper `Transformer-XL:
|
||||
Attentive Language Models Beyond a Fixed-Length Context <https://arxiv.org/abs/1901.02860>`__ by Zihang Dai*,
|
||||
Zhilin Yang*, Yiming Yang, Jaime Carbonell, Quoc V. Le, Ruslan Salakhutdinov.
|
||||
37. :doc:`Wav2Vec2 <model_doc/wav2vec2>` (from Facebook AI) released with the paper `wav2vec 2.0: A Framework for
|
||||
51. :doc:`Vision Transformer (ViT) <model_doc/vit>` (from Google AI) released with the paper `An Image is Worth 16x16
|
||||
Words: Transformers for Image Recognition at Scale <https://arxiv.org/abs/2010.11929>`__ by Alexey Dosovitskiy,
|
||||
Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias
|
||||
Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, Neil Houlsby.
|
||||
52. :doc:`Wav2Vec2 <model_doc/wav2vec2>` (from Facebook AI) released with the paper `wav2vec 2.0: A Framework for
|
||||
Self-Supervised Learning of Speech Representations <https://arxiv.org/abs/2006.11477>`__ by Alexei Baevski, Henry
|
||||
Zhou, Abdelrahman Mohamed, Michael Auli.
|
||||
38. :doc:`XLM <model_doc/xlm>` (from Facebook) released together with the paper `Cross-lingual Language Model
|
||||
53. :doc:`XLM <model_doc/xlm>` (from Facebook) released together with the paper `Cross-lingual Language Model
|
||||
Pretraining <https://arxiv.org/abs/1901.07291>`__ by Guillaume Lample and Alexis Conneau.
|
||||
39. :doc:`XLM-ProphetNet <model_doc/xlmprophetnet>` (from Microsoft Research) released with the paper `ProphetNet:
|
||||
54. :doc:`XLM-ProphetNet <model_doc/xlmprophetnet>` (from Microsoft Research) released with the paper `ProphetNet:
|
||||
Predicting Future N-gram for Sequence-to-Sequence Pre-training <https://arxiv.org/abs/2001.04063>`__ by Yu Yan,
|
||||
Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang and Ming Zhou.
|
||||
40. :doc:`XLM-RoBERTa <model_doc/xlmroberta>` (from Facebook AI), released together with the paper `Unsupervised
|
||||
55. :doc:`XLM-RoBERTa <model_doc/xlmroberta>` (from Facebook AI), released together with the paper `Unsupervised
|
||||
Cross-lingual Representation Learning at Scale <https://arxiv.org/abs/1911.02116>`__ by Alexis Conneau*, Kartikay
|
||||
Khandelwal*, Naman Goyal, Vishrav Chaudhary, Guillaume Wenzek, Francisco Guzmán, Edouard Grave, Myle Ott, Luke
|
||||
Zettlemoyer and Veselin Stoyanov.
|
||||
41. :doc:`XLNet <model_doc/xlnet>` (from Google/CMU) released with the paper `XLNet: Generalized Autoregressive
|
||||
56. :doc:`XLNet <model_doc/xlnet>` (from Google/CMU) released with the paper `XLNet: Generalized Autoregressive
|
||||
Pretraining for Language Understanding <https://arxiv.org/abs/1906.08237>`__ by Zhilin Yang*, Zihang Dai*, Yiming
|
||||
Yang, Jaime Carbonell, Ruslan Salakhutdinov, Quoc V. Le.
|
||||
57. :doc:`XLSR-Wav2Vec2 <model_doc/xlsr_wav2vec2>` (from Facebook AI) released with the paper `Unsupervised
|
||||
Cross-Lingual Representation Learning For Speech Recognition <https://arxiv.org/abs/2006.13979>`__ by Alexis
|
||||
Conneau, Alexei Baevski, Ronan Collobert, Abdelrahman Mohamed, Michael Auli.
|
||||
|
||||
|
||||
.. _bigtable:
|
||||
|
||||
The table below represents the current support in the library for each of those models, whether they have a Python
|
||||
tokenizer (called "slow"). A "fast" tokenizer backed by the 🤗 Tokenizers library, whether they have support in PyTorch,
|
||||
TensorFlow and/or Flax.
|
||||
tokenizer (called "slow"). A "fast" tokenizer backed by the 🤗 Tokenizers library, whether they have support in Jax (via
|
||||
Flax), PyTorch, and/or TensorFlow.
|
||||
|
||||
..
|
||||
This table is updated automatically from the auto modules with `make fix-copies`. Do not update manually!
|
||||
@ -229,10 +280,16 @@ TensorFlow and/or Flax.
|
||||
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
|
||||
| Bert Generation | ✅ | ❌ | ✅ | ❌ | ❌ |
|
||||
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
|
||||
| BigBird | ✅ | ✅ | ✅ | ❌ | ❌ |
|
||||
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
|
||||
| BigBirdPegasus | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
|
||||
| Blenderbot | ✅ | ❌ | ✅ | ✅ | ❌ |
|
||||
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
|
||||
| BlenderbotSmall | ✅ | ❌ | ✅ | ✅ | ❌ |
|
||||
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
|
||||
| CLIP | ✅ | ✅ | ✅ | ❌ | ❌ |
|
||||
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
|
||||
| CTRL | ✅ | ❌ | ✅ | ✅ | ❌ |
|
||||
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
|
||||
| CamemBERT | ✅ | ✅ | ✅ | ✅ | ❌ |
|
||||
@ -241,11 +298,15 @@ TensorFlow and/or Flax.
|
||||
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
|
||||
| DPR | ✅ | ✅ | ✅ | ✅ | ❌ |
|
||||
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
|
||||
| DeBERTa | ✅ | ❌ | ✅ | ❌ | ❌ |
|
||||
| DeBERTa | ✅ | ✅ | ✅ | ❌ | ❌ |
|
||||
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
|
||||
| DeBERTa-v2 | ✅ | ❌ | ✅ | ❌ | ❌ |
|
||||
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
|
||||
| DeiT | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
|
||||
| DistilBERT | ✅ | ✅ | ✅ | ✅ | ❌ |
|
||||
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
|
||||
| ELECTRA | ✅ | ✅ | ✅ | ✅ | ❌ |
|
||||
| ELECTRA | ✅ | ✅ | ✅ | ✅ | ✅ |
|
||||
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
|
||||
| Encoder decoder | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
|
||||
@ -255,18 +316,28 @@ TensorFlow and/or Flax.
|
||||
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
|
||||
| Funnel Transformer | ✅ | ✅ | ✅ | ✅ | ❌ |
|
||||
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
|
||||
| GPT Neo | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
|
||||
| I-BERT | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
|
||||
| LED | ✅ | ✅ | ✅ | ✅ | ❌ |
|
||||
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
|
||||
| LUKE | ✅ | ❌ | ✅ | ❌ | ❌ |
|
||||
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
|
||||
| LXMERT | ✅ | ✅ | ✅ | ✅ | ❌ |
|
||||
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
|
||||
| LayoutLM | ✅ | ✅ | ✅ | ❌ | ❌ |
|
||||
| LayoutLM | ✅ | ✅ | ✅ | ✅ | ❌ |
|
||||
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
|
||||
| Longformer | ✅ | ✅ | ✅ | ✅ | ❌ |
|
||||
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
|
||||
| M2M100 | ✅ | ❌ | ✅ | ❌ | ❌ |
|
||||
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
|
||||
| MPNet | ✅ | ✅ | ✅ | ✅ | ❌ |
|
||||
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
|
||||
| Marian | ✅ | ❌ | ✅ | ✅ | ❌ |
|
||||
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
|
||||
| MegatronBert | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
|
||||
| MobileBERT | ✅ | ✅ | ✅ | ✅ | ❌ |
|
||||
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
|
||||
| OpenAI GPT | ✅ | ✅ | ✅ | ✅ | ❌ |
|
||||
@ -277,7 +348,7 @@ TensorFlow and/or Flax.
|
||||
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
|
||||
| ProphetNet | ✅ | ❌ | ✅ | ❌ | ❌ |
|
||||
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
|
||||
| RAG | ✅ | ❌ | ✅ | ❌ | ❌ |
|
||||
| RAG | ✅ | ❌ | ✅ | ✅ | ❌ |
|
||||
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
|
||||
| Reformer | ✅ | ✅ | ✅ | ❌ | ❌ |
|
||||
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
|
||||
@ -285,6 +356,8 @@ TensorFlow and/or Flax.
|
||||
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
|
||||
| RoBERTa | ✅ | ✅ | ✅ | ✅ | ✅ |
|
||||
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
|
||||
| Speech2Text | ✅ | ❌ | ✅ | ❌ | ❌ |
|
||||
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
|
||||
| SqueezeBERT | ✅ | ✅ | ✅ | ❌ | ❌ |
|
||||
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
|
||||
| T5 | ✅ | ✅ | ✅ | ✅ | ❌ |
|
||||
@ -293,6 +366,8 @@ TensorFlow and/or Flax.
|
||||
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
|
||||
| Transformer-XL | ✅ | ❌ | ✅ | ✅ | ❌ |
|
||||
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
|
||||
| ViT | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
|
||||
| Wav2Vec2 | ✅ | ❌ | ✅ | ❌ | ❌ |
|
||||
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
|
||||
| XLM | ✅ | ❌ | ✅ | ✅ | ❌ |
|
||||
@ -335,14 +410,18 @@ TensorFlow and/or Flax.
|
||||
|
||||
pretrained_models
|
||||
examples
|
||||
troubleshooting
|
||||
custom_datasets
|
||||
notebooks
|
||||
sagemaker
|
||||
community
|
||||
converting_tensorflow_models
|
||||
migration
|
||||
contributing
|
||||
add_new_model
|
||||
fast_tokenizers
|
||||
testing
|
||||
debugging
|
||||
serialization
|
||||
|
||||
.. toctree::
|
||||
@ -359,6 +438,7 @@ TensorFlow and/or Flax.
|
||||
|
||||
main_classes/callback
|
||||
main_classes/configuration
|
||||
main_classes/data_collator
|
||||
main_classes/logging
|
||||
main_classes/model
|
||||
main_classes/optimizer_schedules
|
||||
@ -367,6 +447,7 @@ TensorFlow and/or Flax.
|
||||
main_classes/processors
|
||||
main_classes/tokenizer
|
||||
main_classes/trainer
|
||||
main_classes/feature_extractor
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
@ -379,13 +460,20 @@ TensorFlow and/or Flax.
|
||||
model_doc/bert
|
||||
model_doc/bertweet
|
||||
model_doc/bertgeneration
|
||||
model_doc/bert_japanese
|
||||
model_doc/bigbird
|
||||
model_doc/bigbird_pegasus
|
||||
model_doc/blenderbot
|
||||
model_doc/blenderbot_small
|
||||
model_doc/bort
|
||||
model_doc/camembert
|
||||
model_doc/clip
|
||||
model_doc/convbert
|
||||
model_doc/cpm
|
||||
model_doc/ctrl
|
||||
model_doc/deberta
|
||||
model_doc/deberta_v2
|
||||
model_doc/deit
|
||||
model_doc/dialogpt
|
||||
model_doc/distilbert
|
||||
model_doc/dpr
|
||||
@ -395,17 +483,23 @@ TensorFlow and/or Flax.
|
||||
model_doc/fsmt
|
||||
model_doc/funnel
|
||||
model_doc/herbert
|
||||
model_doc/ibert
|
||||
model_doc/layoutlm
|
||||
model_doc/led
|
||||
model_doc/longformer
|
||||
model_doc/luke
|
||||
model_doc/lxmert
|
||||
model_doc/marian
|
||||
model_doc/m2m_100
|
||||
model_doc/mbart
|
||||
model_doc/megatron_bert
|
||||
model_doc/megatron_gpt2
|
||||
model_doc/mobilebert
|
||||
model_doc/mpnet
|
||||
model_doc/mt5
|
||||
model_doc/gpt
|
||||
model_doc/gpt2
|
||||
model_doc/gpt_neo
|
||||
model_doc/pegasus
|
||||
model_doc/phobert
|
||||
model_doc/prophetnet
|
||||
@ -413,15 +507,18 @@ TensorFlow and/or Flax.
|
||||
model_doc/reformer
|
||||
model_doc/retribert
|
||||
model_doc/roberta
|
||||
model_doc/speech_to_text
|
||||
model_doc/squeezebert
|
||||
model_doc/t5
|
||||
model_doc/tapas
|
||||
model_doc/transformerxl
|
||||
model_doc/vit
|
||||
model_doc/wav2vec2
|
||||
model_doc/xlm
|
||||
model_doc/xlmprophetnet
|
||||
model_doc/xlmroberta
|
||||
model_doc/xlnet
|
||||
model_doc/xlsr_wav2vec2
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
@ -432,3 +529,4 @@ TensorFlow and/or Flax.
|
||||
internal/tokenization_utils
|
||||
internal/trainer_utils
|
||||
internal/generation_utils
|
||||
internal/file_utils
|
||||
|
||||
@ -149,11 +149,30 @@ So if you don't have any specific environment variable set, the cache directory
|
||||
(``PYTORCH_TRANSFORMERS_CACHE`` or ``PYTORCH_PRETRAINED_BERT_CACHE``), those will be used if there is no shell
|
||||
environment variable for ``TRANSFORMERS_CACHE``.
|
||||
|
||||
### Note on model downloads (Continuous Integration or large-scale deployments)
|
||||
### Offline mode
|
||||
|
||||
It's possible to run 🤗 Transformers in a firewalled or a no-network environment.
|
||||
|
||||
Setting environment variable `TRANSFORMERS_OFFLINE=1` will tell 🤗 Transformers to use local files only and will not try to look things up.
|
||||
|
||||
Most likely you may want to couple this with `HF_DATASETS_OFFLINE=1` that performs the same for 🤗 Datasets if you're using the latter.
|
||||
|
||||
Here is an example of how this can be used on a filesystem that is shared between a normally networked and a firewalled to the external world instances.
|
||||
|
||||
On the instance with the normal network run your program which will download and cache models (and optionally datasets if you use 🤗 Datasets). For example:
|
||||
|
||||
```
|
||||
python examples/pytorch/translation/run_translation.py --model_name_or_path t5-small --dataset_name wmt16 --dataset_config ro-en ...
|
||||
```
|
||||
|
||||
and then with the same filesystem you can now run the same program on a firewalled instance:
|
||||
```
|
||||
HF_DATASETS_OFFLINE=1 TRANSFORMERS_OFFLINE=1 \
|
||||
python examples/pytorch/translation/run_translation.py --model_name_or_path t5-small --dataset_name wmt16 --dataset_config ro-en ...
|
||||
```
|
||||
and it should succeed without any hanging waiting to timeout.
|
||||
|
||||
|
||||
If you expect to be downloading large volumes of models (more than 1,000) from our hosted bucket (for instance through
|
||||
your CI setup, or a large-scale production deployment), please cache the model files on your end. It will be way
|
||||
faster, and cheaper. Feel free to contact us privately if you need any help.
|
||||
|
||||
## Do you want to run a Transformer model on a mobile device?
|
||||
|
||||
|
||||
54
docs/source/internal/file_utils.rst
Normal file
54
docs/source/internal/file_utils.rst
Normal file
@ -0,0 +1,54 @@
|
||||
..
|
||||
Copyright 2021 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
General Utilities
|
||||
-----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
This page lists all of Transformers general utility functions that are found in the file ``file_utils.py``.
|
||||
|
||||
Most of those are only useful if you are studying the general code in the library.
|
||||
|
||||
|
||||
Enums and namedtuples
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.file_utils.ExplicitEnum
|
||||
|
||||
.. autoclass:: transformers.file_utils.PaddingStrategy
|
||||
|
||||
.. autoclass:: transformers.file_utils.TensorType
|
||||
|
||||
|
||||
Special Decorators
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autofunction:: transformers.file_utils.add_start_docstrings
|
||||
|
||||
.. autofunction:: transformers.file_utils.add_start_docstrings_to_model_forward
|
||||
|
||||
.. autofunction:: transformers.file_utils.add_end_docstrings
|
||||
|
||||
.. autofunction:: transformers.file_utils.add_code_sample_docstrings
|
||||
|
||||
.. autofunction:: transformers.file_utils.replace_return_docstrings
|
||||
|
||||
|
||||
Special Properties
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.file_utils.cached_property
|
||||
|
||||
|
||||
Other Utilities
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.file_utils._BaseLazyModule
|
||||
@ -151,6 +151,33 @@ generation.
|
||||
.. autoclass:: transformers.HammingDiversityLogitsProcessor
|
||||
:members: __call__
|
||||
|
||||
.. autoclass:: transformers.ForcedBOSTokenLogitsProcessor
|
||||
:members: __call__
|
||||
|
||||
.. autoclass:: transformers.ForcedEOSTokenLogitsProcessor
|
||||
:members: __call__
|
||||
|
||||
.. autoclass:: transformers.InfNanRemoveLogitsProcessor
|
||||
:members: __call__
|
||||
|
||||
|
||||
StoppingCriteria
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
A :class:`~transformers.StoppingCriteria` can be used to change when to stop generation (other than EOS token).
|
||||
|
||||
.. autoclass:: transformers.StoppingCriteria
|
||||
:members: __call__
|
||||
|
||||
.. autoclass:: transformers.StoppingCriteriaList
|
||||
:members: __call__
|
||||
|
||||
.. autoclass:: transformers.MaxLengthCriteria
|
||||
:members: __call__
|
||||
|
||||
.. autoclass:: transformers.MaxTimeCriteria
|
||||
:members: __call__
|
||||
|
||||
BeamSearch
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
|
||||
@ -47,6 +47,4 @@ Data format
|
||||
Utilities
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autofunction:: transformers.pipelines.get_framework
|
||||
|
||||
.. autoclass:: transformers.pipelines.PipelineException
|
||||
|
||||
@ -38,12 +38,6 @@ SpecialTokensMixin
|
||||
Enums and namedtuples
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.tokenization_utils_base.ExplicitEnum
|
||||
|
||||
.. autoclass:: transformers.tokenization_utils_base.PaddingStrategy
|
||||
|
||||
.. autoclass:: transformers.tokenization_utils_base.TensorType
|
||||
|
||||
.. autoclass:: transformers.tokenization_utils_base.TruncationStrategy
|
||||
|
||||
.. autoclass:: transformers.tokenization_utils_base.CharSpan
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
..
|
||||
..
|
||||
Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
@ -22,7 +22,7 @@ Utilities
|
||||
|
||||
.. autoclass:: transformers.EvalPrediction
|
||||
|
||||
.. autoclass:: transformers.EvaluationStrategy
|
||||
.. autoclass:: transformers.IntervalStrategy
|
||||
|
||||
.. autofunction:: transformers.set_seed
|
||||
|
||||
@ -46,3 +46,9 @@ Distributed Evaluation
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.HfArgumentParser
|
||||
|
||||
|
||||
Debug Utilities
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.debug_utils.DebugUnderflowOverflow
|
||||
|
||||
@ -74,6 +74,32 @@ TrainerCallback
|
||||
.. autoclass:: transformers.TrainerCallback
|
||||
:members:
|
||||
|
||||
Here is an example of how to register a custom callback with the PyTorch :class:`~transformers.Trainer`:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
class MyCallback(TrainerCallback):
|
||||
"A callback that prints a message at the beginning of training"
|
||||
|
||||
def on_train_begin(self, args, state, control, **kwargs):
|
||||
print("Starting training")
|
||||
|
||||
trainer = Trainer(
|
||||
model,
|
||||
args,
|
||||
train_dataset=train_dataset,
|
||||
eval_dataset=eval_dataset,
|
||||
callbacks=[MyCallback] # We can either pass the callback class this way or an instance of it (MyCallback())
|
||||
)
|
||||
|
||||
Another way to register a callback is to call ``trainer.add_callback()`` as follows:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
trainer = Trainer(...)
|
||||
trainer.add_callback(MyCallback)
|
||||
# Alternatively, we can pass an instance of the callback class
|
||||
trainer.add_callback(MyCallback())
|
||||
|
||||
TrainerState
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
71
docs/source/main_classes/data_collator.rst
Normal file
71
docs/source/main_classes/data_collator.rst
Normal file
@ -0,0 +1,71 @@
|
||||
..
|
||||
Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
Data Collator
|
||||
-----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
Data collators are objects that will form a batch by using a list of dataset elements as input. These elements are of
|
||||
the same type as the elements of :obj:`train_dataset` or :obj:`eval_dataset`.
|
||||
|
||||
To be able to build batches, data collators may apply some processing (like padding). Some of them (like
|
||||
:class:`~transformers.DataCollatorForLanguageModeling`) also apply some random data augmentation (like random masking)
|
||||
oin the formed batch.
|
||||
|
||||
Examples of use can be found in the :doc:`example scripts <../examples>` or :doc:`example notebooks <../notebooks>`.
|
||||
|
||||
|
||||
Default data collator
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autofunction:: transformers.data.data_collator.default_data_collator
|
||||
|
||||
|
||||
DataCollatorWithPadding
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.data.data_collator.DataCollatorWithPadding
|
||||
:members:
|
||||
|
||||
|
||||
DataCollatorForTokenClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.data.data_collator.DataCollatorForTokenClassification
|
||||
:members:
|
||||
|
||||
|
||||
DataCollatorForSeq2Seq
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.data.data_collator.DataCollatorForSeq2Seq
|
||||
:members:
|
||||
|
||||
|
||||
DataCollatorForLanguageModeling
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.data.data_collator.DataCollatorForLanguageModeling
|
||||
:members: mask_tokens
|
||||
|
||||
|
||||
DataCollatorForWholeWordMask
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.data.data_collator.DataCollatorForWholeWordMask
|
||||
:members: mask_tokens
|
||||
|
||||
|
||||
DataCollatorForPermutationLanguageModeling
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.data.data_collator.DataCollatorForPermutationLanguageModeling
|
||||
:members: mask_tokens
|
||||
48
docs/source/main_classes/feature_extractor.rst
Normal file
48
docs/source/main_classes/feature_extractor.rst
Normal file
@ -0,0 +1,48 @@
|
||||
..
|
||||
Copyright 2021 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
|
||||
Feature Extractor
|
||||
-----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
A feature extractor is in charge of preparing input features for a multi-modal model. This includes feature extraction
|
||||
from sequences, *e.g.*, pre-processing audio files to Log-Mel Spectrogram features, feature extraction from images
|
||||
*e.g.* cropping image image files, but also padding, normalization, and conversion to Numpy, PyTorch, and TensorFlow
|
||||
tensors.
|
||||
|
||||
|
||||
FeatureExtractionMixin
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.feature_extraction_utils.FeatureExtractionMixin
|
||||
:members: from_pretrained, save_pretrained
|
||||
|
||||
|
||||
SequenceFeatureExtractor
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.SequenceFeatureExtractor
|
||||
:members: pad
|
||||
|
||||
|
||||
BatchFeature
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.BatchFeature
|
||||
:members:
|
||||
|
||||
|
||||
ImageFeatureExtractionMixin
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.image_utils.ImageFeatureExtractionMixin
|
||||
:members:
|
||||
@ -65,6 +65,10 @@ Other functions
|
||||
|
||||
.. autofunction:: transformers.logging.get_logger
|
||||
|
||||
.. autofunction:: transformers.logging.enable_default_handler
|
||||
|
||||
.. autofunction:: transformers.logging.disable_default_handler
|
||||
|
||||
.. autofunction:: transformers.logging.enable_explicit_format
|
||||
|
||||
.. autofunction:: transformers.logging.reset_format
|
||||
|
||||
@ -73,3 +73,10 @@ Generation
|
||||
|
||||
.. autoclass:: transformers.generation_tf_utils.TFGenerationMixin
|
||||
:members:
|
||||
|
||||
|
||||
Pushing to the Hub
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.file_utils.PushToHubMixin
|
||||
:members:
|
||||
|
||||
@ -13,8 +13,8 @@
|
||||
Model outputs
|
||||
-----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
PyTorch models have outputs that are instances of subclasses of :class:`~transformers.file_utils.ModelOutput`. Those
|
||||
are data structures containing all the information returned by the model, but that can also be used as tuples or
|
||||
All models have outputs that are instances of subclasses of :class:`~transformers.file_utils.ModelOutput`. Those are
|
||||
data structures containing all the information returned by the model, but that can also be used as tuples or
|
||||
dictionaries.
|
||||
|
||||
Let's see of this looks on an example:
|
||||
@ -60,7 +60,7 @@ ModelOutput
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.file_utils.ModelOutput
|
||||
:members:
|
||||
:members: to_tuple
|
||||
|
||||
|
||||
BaseModelOutput
|
||||
|
||||
@ -23,6 +23,7 @@ There are two categories of pipeline abstractions to be aware about:
|
||||
- The :func:`~transformers.pipeline` which is the most powerful object encapsulating all other pipelines.
|
||||
- The other task-specific pipelines:
|
||||
|
||||
- :class:`~transformers.AutomaticSpeechRecognitionPipeline`
|
||||
- :class:`~transformers.ConversationalPipeline`
|
||||
- :class:`~transformers.FeatureExtractionPipeline`
|
||||
- :class:`~transformers.FillMaskPipeline`
|
||||
@ -35,6 +36,7 @@ There are two categories of pipeline abstractions to be aware about:
|
||||
- :class:`~transformers.ZeroShotClassificationPipeline`
|
||||
- :class:`~transformers.Text2TextGenerationPipeline`
|
||||
- :class:`~transformers.TableQuestionAnsweringPipeline`
|
||||
- :class:`~transformers.ImageClassificationPipeline`
|
||||
|
||||
The pipeline abstraction
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
@ -48,6 +50,13 @@ pipeline but requires an additional argument which is the `task`.
|
||||
The task specific pipelines
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
AutomaticSpeechRecognitionPipeline
|
||||
=======================================================================================================================
|
||||
|
||||
.. autoclass:: transformers.AutomaticSpeechRecognitionPipeline
|
||||
:special-members: __call__
|
||||
:members:
|
||||
|
||||
ConversationalPipeline
|
||||
=======================================================================================================================
|
||||
|
||||
@ -71,6 +80,13 @@ FillMaskPipeline
|
||||
:special-members: __call__
|
||||
:members:
|
||||
|
||||
ImageClassificationPipeline
|
||||
=======================================================================================================================
|
||||
|
||||
.. autoclass:: transformers.ImageClassificationPipeline
|
||||
:special-members: __call__
|
||||
:members:
|
||||
|
||||
NerPipeline
|
||||
=======================================================================================================================
|
||||
|
||||
|
||||
@ -68,8 +68,8 @@ Additionally, the following method can be used to load values from a data file a
|
||||
Example usage
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
An example using these processors is given in the `run_glue.py
|
||||
<https://github.com/huggingface/pytorch-transformers/blob/master/examples/text-classification/run_glue.py>`__ script.
|
||||
An example using these processors is given in the :prefix_link:`run_glue.py
|
||||
<examples/legacy/text-classification/run_glue.py>` script.
|
||||
|
||||
|
||||
XNLI
|
||||
@ -89,8 +89,8 @@ This library hosts the processor to load the XNLI data:
|
||||
|
||||
Please note that since the gold labels are available on the test set, evaluation is performed on the test set.
|
||||
|
||||
An example using these processors is given in the `run_xnli.py
|
||||
<https://github.com/huggingface/pytorch-transformers/blob/master/examples/text-classification/run_xnli.py>`__ script.
|
||||
An example using these processors is given in the :prefix_link:`run_xnli.py
|
||||
<examples/legacy/text-classification/run_xnli.py>` script.
|
||||
|
||||
|
||||
SQuAD
|
||||
@ -169,4 +169,4 @@ Using `tensorflow_datasets` is as easy as using a data file:
|
||||
|
||||
|
||||
Another example using these processors is given in the :prefix_link:`run_squad.py
|
||||
<examples/question-answering/run_squad.py>` script.
|
||||
<examples/legacy/question-answering/run_squad.py>` script.
|
||||
|
||||
@ -54,19 +54,24 @@ PreTrainedTokenizer
|
||||
|
||||
.. autoclass:: transformers.PreTrainedTokenizer
|
||||
:special-members: __call__
|
||||
:members:
|
||||
|
||||
.. automethod:: encode
|
||||
:members: batch_decode, convert_ids_to_tokens, convert_tokens_to_ids, convert_tokens_to_string, decode, encode,
|
||||
get_added_vocab, get_special_tokens_mask, num_special_tokens_to_add, prepare_for_tokenization, tokenize,
|
||||
vocab_size
|
||||
|
||||
|
||||
PreTrainedTokenizerFast
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The :class:`~transformers.PreTrainedTokenizerFast` depend on the `tokenizers
|
||||
<https://huggingface.co/docs/tokenizers>`__ library. The tokenizers obtained from the 🤗 tokenizers library can be
|
||||
loaded very simply into 🤗 transformers. Take a look at the :doc:`Using tokenizers from 🤗 tokenizers
|
||||
<../fast_tokenizers>` page to understand how this is done.
|
||||
|
||||
.. autoclass:: transformers.PreTrainedTokenizerFast
|
||||
:special-members: __call__
|
||||
:members:
|
||||
|
||||
.. automethod:: encode
|
||||
:members: batch_decode, convert_ids_to_tokens, convert_tokens_to_ids, convert_tokens_to_string, decode, encode,
|
||||
get_added_vocab, get_special_tokens_mask, num_special_tokens_to_add,
|
||||
set_truncation_and_padding,tokenize, vocab_size
|
||||
|
||||
|
||||
BatchEncoding
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -169,8 +169,8 @@ Regarding the `TFTrainer` class:
|
||||
- The `TFTrainer` method `_setup_wandb` is deprecated in favor of `setup_wandb`.
|
||||
- The `TFTrainer` method `_run_model` is deprecated in favor of `run_model`.
|
||||
|
||||
Regarding the `TrainerArgument` class:
|
||||
- The `TrainerArgument` argument `evaluate_during_training` is deprecated in favor of `evaluation_strategy`.
|
||||
Regarding the `TrainingArguments` class:
|
||||
- The `TrainingArguments` argument `evaluate_during_training` is deprecated in favor of `evaluation_strategy`.
|
||||
|
||||
Regarding the Transfo-XL model:
|
||||
- The Transfo-XL configuration attribute `tie_weight` becomes `tie_words_embeddings`.
|
||||
|
||||
@ -43,7 +43,8 @@ Tips:
|
||||
similar to a BERT-like architecture with the same number of hidden layers as it has to iterate through the same
|
||||
number of (repeating) layers.
|
||||
|
||||
The original code can be found `here <https://github.com/google-research/ALBERT>`__.
|
||||
This model was contributed by `lysandre <https://huggingface.co/lysandre>`__. The original code can be found `here
|
||||
<https://github.com/google-research/ALBERT>`__.
|
||||
|
||||
AlbertConfig
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
@ -44,6 +44,13 @@ AutoTokenizer
|
||||
:members:
|
||||
|
||||
|
||||
AutoFeatureExtractor
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.AutoFeatureExtractor
|
||||
:members:
|
||||
|
||||
|
||||
AutoModel
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
@ -121,6 +128,13 @@ AutoModelForTableQuestionAnswering
|
||||
:members:
|
||||
|
||||
|
||||
AutoModelForImageClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.AutoModelForImageClassification
|
||||
:members:
|
||||
|
||||
|
||||
TFAutoModel
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
@ -189,3 +203,52 @@ FlaxAutoModel
|
||||
|
||||
.. autoclass:: transformers.FlaxAutoModel
|
||||
:members:
|
||||
|
||||
|
||||
FlaxAutoModelForPreTraining
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.FlaxAutoModelForPreTraining
|
||||
:members:
|
||||
|
||||
|
||||
FlaxAutoModelForMaskedLM
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.FlaxAutoModelForMaskedLM
|
||||
:members:
|
||||
|
||||
|
||||
FlaxAutoModelForSequenceClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.FlaxAutoModelForSequenceClassification
|
||||
:members:
|
||||
|
||||
|
||||
FlaxAutoModelForQuestionAnswering
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.FlaxAutoModelForQuestionAnswering
|
||||
:members:
|
||||
|
||||
|
||||
FlaxAutoModelForTokenClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.FlaxAutoModelForTokenClassification
|
||||
:members:
|
||||
|
||||
|
||||
FlaxAutoModelForMultipleChoice
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.FlaxAutoModelForMultipleChoice
|
||||
:members:
|
||||
|
||||
|
||||
FlaxAutoModelForNextSentencePrediction
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.FlaxAutoModelForNextSentencePrediction
|
||||
:members:
|
||||
|
||||
@ -35,14 +35,15 @@ According to the abstract,
|
||||
state-of-the-art results on a range of abstractive dialogue, question answering, and summarization tasks, with gains
|
||||
of up to 6 ROUGE.
|
||||
|
||||
The Authors' code can be found `here <https://github.com/pytorch/fairseq/tree/master/examples/bart>`__.
|
||||
This model was contributed by `sshleifer <https://huggingface.co/sshleifer>`__. The Authors' code can be found `here
|
||||
<https://github.com/pytorch/fairseq/tree/master/examples/bart>`__.
|
||||
|
||||
|
||||
Examples
|
||||
_______________________________________________________________________________________________________________________
|
||||
|
||||
- Examples and scripts for fine-tuning BART and other models for sequence to sequence tasks can be found in
|
||||
:prefix_link:`examples/seq2seq/ <examples/seq2seq/README.md>`.
|
||||
:prefix_link:`examples/pytorch/summarization/ <examples/pytorch/summarization/README.md>`.
|
||||
- An example of how to train :class:`~transformers.BartForConditionalGeneration` with a Hugging Face :obj:`datasets`
|
||||
object can be found in this `forum discussion
|
||||
<https://discuss.huggingface.co/t/train-bart-for-conditional-generation-e-g-summarization/1904>`__.
|
||||
|
||||
@ -16,7 +16,7 @@ BARThez
|
||||
Overview
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The BARThez model was proposed in `BARThez: a Skilled Pretrained French Sequence-to-Sequence Model`
|
||||
The BARThez model was proposed in `BARThez: a Skilled Pretrained French Sequence-to-Sequence Model
|
||||
<https://arxiv.org/abs/2010.12321>`__ by Moussa Kamal Eddine, Antoine J.-P. Tixier, Michalis Vazirgiannis on 23 Oct,
|
||||
2020.
|
||||
|
||||
@ -35,14 +35,15 @@ summarization dataset, OrangeSum, that we release with this paper. We also conti
|
||||
pretrained multilingual BART on BARThez's corpus, and we show that the resulting model, which we call mBARTHez,
|
||||
provides a significant boost over vanilla BARThez, and is on par with or outperforms CamemBERT and FlauBERT.*
|
||||
|
||||
The Authors' code can be found `here <https://github.com/moussaKam/BARThez>`__.
|
||||
This model was contributed by `moussakam <https://huggingface.co/moussakam>`__. The Authors' code can be found `here
|
||||
<https://github.com/moussaKam/BARThez>`__.
|
||||
|
||||
|
||||
Examples
|
||||
_______________________________________________________________________________________________________________________
|
||||
|
||||
- BARThez can be fine-tuned on sequence-to-sequence tasks in a similar way as BART, check:
|
||||
:prefix_link:`examples/seq2seq/ <examples/seq2seq/README.md>`.
|
||||
:prefix_link:`examples/pytorch/summarization/ <examples/pytorch/summarization/README.md>`.
|
||||
|
||||
|
||||
BarthezTokenizer
|
||||
|
||||
@ -42,7 +42,8 @@ Tips:
|
||||
- BERT was trained with the masked language modeling (MLM) and next sentence prediction (NSP) objectives. It is
|
||||
efficient at predicting masked tokens and at NLU in general, but is not optimal for text generation.
|
||||
|
||||
The original code can be found `here <https://github.com/google-research/bert>`__.
|
||||
This model was contributed by `thomwolf <https://huggingface.co/thomwolf>`__. The original code can be found `here
|
||||
<https://github.com/google-research/bert>`__.
|
||||
|
||||
BertConfig
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
@ -90,7 +91,7 @@ BertForPreTraining
|
||||
:members: forward
|
||||
|
||||
|
||||
BertModelLMHeadModel
|
||||
BertLMHeadModel
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.BertLMHeadModel
|
||||
@ -209,8 +210,50 @@ FlaxBertModel
|
||||
:members: __call__
|
||||
|
||||
|
||||
FlaxBertForPreTraining
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.FlaxBertForPreTraining
|
||||
:members: __call__
|
||||
|
||||
|
||||
FlaxBertForMaskedLM
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.FlaxBertForMaskedLM
|
||||
:members: __call__
|
||||
|
||||
|
||||
FlaxBertForNextSentencePrediction
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.FlaxBertForNextSentencePrediction
|
||||
:members: __call__
|
||||
|
||||
|
||||
FlaxBertForSequenceClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.FlaxBertForSequenceClassification
|
||||
:members: __call__
|
||||
|
||||
|
||||
FlaxBertForMultipleChoice
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.FlaxBertForMultipleChoice
|
||||
:members: __call__
|
||||
|
||||
|
||||
FlaxBertForTokenClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.FlaxBertForTokenClassification
|
||||
:members: __call__
|
||||
|
||||
|
||||
FlaxBertForQuestionAnswering
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.FlaxBertForQuestionAnswering
|
||||
:members: __call__
|
||||
|
||||
80
docs/source/model_doc/bert_japanese.rst
Normal file
80
docs/source/model_doc/bert_japanese.rst
Normal file
@ -0,0 +1,80 @@
|
||||
..
|
||||
Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
BertJapanese
|
||||
-----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
Overview
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The BERT models trained on Japanese text.
|
||||
|
||||
There are models with two different tokenization methods:
|
||||
|
||||
- Tokenize with MeCab and WordPiece. This requires some extra dependencies, `fugashi
|
||||
<https://github.com/polm/fugashi>`__ which is a wrapper around `MeCab <https://taku910.github.io/mecab/>`__.
|
||||
- Tokenize into characters.
|
||||
|
||||
To use `MecabTokenizer`, you should ``pip install transformers["ja"]`` (or ``pip install -e .["ja"]`` if you install
|
||||
from source) to install dependencies.
|
||||
|
||||
See `details on cl-tohoku repository <https://github.com/cl-tohoku/bert-japanese>`__.
|
||||
|
||||
Example of using a model with MeCab and WordPiece tokenization:
|
||||
|
||||
.. code-block::
|
||||
|
||||
>>> import torch
|
||||
>>> from transformers import AutoModel, AutoTokenizer
|
||||
|
||||
>>> bertjapanese = AutoModel.from_pretrained("cl-tohoku/bert-base-japanese")
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("cl-tohoku/bert-base-japanese")
|
||||
|
||||
>>> ## Input Japanese Text
|
||||
>>> line = "吾輩は猫である。"
|
||||
|
||||
>>> inputs = tokenizer(line, return_tensors="pt")
|
||||
|
||||
>>> print(tokenizer.decode(inputs['input_ids'][0]))
|
||||
[CLS] 吾輩 は 猫 で ある 。 [SEP]
|
||||
|
||||
>>> outputs = bertjapanese(**inputs)
|
||||
|
||||
Example of using a model with Character tokenization:
|
||||
|
||||
.. code-block::
|
||||
|
||||
>>> bertjapanese = AutoModel.from_pretrained("cl-tohoku/bert-base-japanese-char")
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("cl-tohoku/bert-base-japanese-char")
|
||||
|
||||
>>> ## Input Japanese Text
|
||||
>>> line = "吾輩は猫である。"
|
||||
|
||||
>>> inputs = tokenizer(line, return_tensors="pt")
|
||||
|
||||
>>> print(tokenizer.decode(inputs['input_ids'][0]))
|
||||
[CLS] 吾 輩 は 猫 で あ る 。 [SEP]
|
||||
|
||||
>>> outputs = bertjapanese(**inputs)
|
||||
|
||||
Tips:
|
||||
|
||||
- This implementation is the same as BERT, except for tokenization method. Refer to the :doc:`documentation of BERT
|
||||
<bert>` for more usage examples.
|
||||
|
||||
This model was contributed by `cl-tohoku <https://huggingface.co/cl-tohoku>`__.
|
||||
|
||||
BertJapaneseTokenizer
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.BertJapaneseTokenizer
|
||||
:members:
|
||||
@ -38,22 +38,22 @@ Usage:
|
||||
|
||||
.. code-block::
|
||||
|
||||
# leverage checkpoints for Bert2Bert model...
|
||||
# use BERT's cls token as BOS token and sep token as EOS token
|
||||
encoder = BertGenerationEncoder.from_pretrained("bert-large-uncased", bos_token_id=101, eos_token_id=102)
|
||||
# add cross attention layers and use BERT's cls token as BOS token and sep token as EOS token
|
||||
decoder = BertGenerationDecoder.from_pretrained("bert-large-uncased", add_cross_attention=True, is_decoder=True, bos_token_id=101, eos_token_id=102)
|
||||
bert2bert = EncoderDecoderModel(encoder=encoder, decoder=decoder)
|
||||
>>> # leverage checkpoints for Bert2Bert model...
|
||||
>>> # use BERT's cls token as BOS token and sep token as EOS token
|
||||
>>> encoder = BertGenerationEncoder.from_pretrained("bert-large-uncased", bos_token_id=101, eos_token_id=102)
|
||||
>>> # add cross attention layers and use BERT's cls token as BOS token and sep token as EOS token
|
||||
>>> decoder = BertGenerationDecoder.from_pretrained("bert-large-uncased", add_cross_attention=True, is_decoder=True, bos_token_id=101, eos_token_id=102)
|
||||
>>> bert2bert = EncoderDecoderModel(encoder=encoder, decoder=decoder)
|
||||
|
||||
# create tokenizer...
|
||||
tokenizer = BertTokenizer.from_pretrained("bert-large-uncased")
|
||||
>>> # create tokenizer...
|
||||
>>> tokenizer = BertTokenizer.from_pretrained("bert-large-uncased")
|
||||
|
||||
input_ids = tokenizer('This is a long article to summarize', add_special_tokens=False, return_tensors="pt").input_ids
|
||||
labels = tokenizer('This is a short summary', return_tensors="pt").input_ids
|
||||
>>> input_ids = tokenizer('This is a long article to summarize', add_special_tokens=False, return_tensors="pt").input_ids
|
||||
>>> labels = tokenizer('This is a short summary', return_tensors="pt").input_ids
|
||||
|
||||
# train...
|
||||
loss = bert2bert(input_ids=input_ids, decoder_input_ids=labels, labels=labels).loss
|
||||
loss.backward()
|
||||
>>> # train...
|
||||
>>> loss = bert2bert(input_ids=input_ids, decoder_input_ids=labels, labels=labels).loss
|
||||
>>> loss.backward()
|
||||
|
||||
|
||||
- Pretrained :class:`~transformers.EncoderDecoderModel` are also directly available in the model hub, e.g.,
|
||||
@ -61,15 +61,15 @@ Usage:
|
||||
|
||||
.. code-block::
|
||||
|
||||
# instantiate sentence fusion model
|
||||
sentence_fuser = EncoderDecoderModel.from_pretrained("google/roberta2roberta_L-24_discofuse")
|
||||
tokenizer = AutoTokenizer.from_pretrained("google/roberta2roberta_L-24_discofuse")
|
||||
>>> # instantiate sentence fusion model
|
||||
>>> sentence_fuser = EncoderDecoderModel.from_pretrained("google/roberta2roberta_L-24_discofuse")
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("google/roberta2roberta_L-24_discofuse")
|
||||
|
||||
input_ids = tokenizer('This is the first sentence. This is the second sentence.', add_special_tokens=False, return_tensors="pt").input_ids
|
||||
>>> input_ids = tokenizer('This is the first sentence. This is the second sentence.', add_special_tokens=False, return_tensors="pt").input_ids
|
||||
|
||||
outputs = sentence_fuser.generate(input_ids)
|
||||
>>> outputs = sentence_fuser.generate(input_ids)
|
||||
|
||||
print(tokenizer.decode(outputs[0]))
|
||||
>>> print(tokenizer.decode(outputs[0]))
|
||||
|
||||
|
||||
Tips:
|
||||
@ -79,7 +79,8 @@ Tips:
|
||||
- For summarization, sentence splitting, sentence fusion and translation, no special tokens are required for the input.
|
||||
Therefore, no EOS token should be added to the end of the input.
|
||||
|
||||
The original code can be found `here <https://tfhub.dev/s?module-type=text-generation&subtype=module,placeholder>`__.
|
||||
This model was contributed by `patrickvonplaten <https://huggingface.co/patrickvonplaten>`__. The original code can be
|
||||
found `here <https://tfhub.dev/s?module-type=text-generation&subtype=module,placeholder>`__.
|
||||
|
||||
BertGenerationConfig
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
@ -31,31 +31,31 @@ Example of use:
|
||||
|
||||
.. code-block::
|
||||
|
||||
import torch
|
||||
from transformers import AutoModel, AutoTokenizer
|
||||
>>> import torch
|
||||
>>> from transformers import AutoModel, AutoTokenizer
|
||||
|
||||
bertweet = AutoModel.from_pretrained("vinai/bertweet-base")
|
||||
>>> bertweet = AutoModel.from_pretrained("vinai/bertweet-base")
|
||||
|
||||
# For transformers v4.x+:
|
||||
tokenizer = AutoTokenizer.from_pretrained("vinai/bertweet-base", use_fast=False)
|
||||
>>> # For transformers v4.x+:
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("vinai/bertweet-base", use_fast=False)
|
||||
|
||||
# For transformers v3.x:
|
||||
# tokenizer = AutoTokenizer.from_pretrained("vinai/bertweet-base")
|
||||
>>> # For transformers v3.x:
|
||||
>>> # tokenizer = AutoTokenizer.from_pretrained("vinai/bertweet-base")
|
||||
|
||||
# INPUT TWEET IS ALREADY NORMALIZED!
|
||||
line = "SC has first two presumptive cases of coronavirus , DHEC confirms HTTPURL via @USER :cry:"
|
||||
>>> # INPUT TWEET IS ALREADY NORMALIZED!
|
||||
>>> line = "SC has first two presumptive cases of coronavirus , DHEC confirms HTTPURL via @USER :cry:"
|
||||
|
||||
input_ids = torch.tensor([tokenizer.encode(line)])
|
||||
>>> input_ids = torch.tensor([tokenizer.encode(line)])
|
||||
|
||||
with torch.no_grad():
|
||||
features = bertweet(input_ids) # Models outputs are now tuples
|
||||
>>> with torch.no_grad():
|
||||
... features = bertweet(input_ids) # Models outputs are now tuples
|
||||
|
||||
## With TensorFlow 2.0+:
|
||||
# from transformers import TFAutoModel
|
||||
# bertweet = TFAutoModel.from_pretrained("vinai/bertweet-base")
|
||||
>>> # With TensorFlow 2.0+:
|
||||
>>> # from transformers import TFAutoModel
|
||||
>>> # bertweet = TFAutoModel.from_pretrained("vinai/bertweet-base")
|
||||
|
||||
|
||||
The original code can be found `here <https://github.com/VinAIResearch/BERTweet>`__.
|
||||
This model was contributed by `dqnguyen <https://huggingface.co/dqnguyen>`__. The original code can be found `here
|
||||
<https://github.com/VinAIResearch/BERTweet>`__.
|
||||
|
||||
BertweetTokenizer
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
136
docs/source/model_doc/bigbird.rst
Normal file
136
docs/source/model_doc/bigbird.rst
Normal file
@ -0,0 +1,136 @@
|
||||
..
|
||||
Copyright 2021 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
BigBird
|
||||
-----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
Overview
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The BigBird model was proposed in `Big Bird: Transformers for Longer Sequences <https://arxiv.org/abs/2007.14062>`__ by
|
||||
Zaheer, Manzil and Guruganesh, Guru and Dubey, Kumar Avinava and Ainslie, Joshua and Alberti, Chris and Ontanon,
|
||||
Santiago and Pham, Philip and Ravula, Anirudh and Wang, Qifan and Yang, Li and others. BigBird, is a sparse-attention
|
||||
based transformer which extends Transformer based models, such as BERT to much longer sequences. In addition to sparse
|
||||
attention, BigBird also applies global attention as well as random attention to the input sequence. Theoretically, it
|
||||
has been shown that applying sparse, global, and random attention approximates full attention, while being
|
||||
computationally much more efficient for longer sequences. As a consequence of the capability to handle longer context,
|
||||
BigBird has shown improved performance on various long document NLP tasks, such as question answering and
|
||||
summarization, compared to BERT or RoBERTa.
|
||||
|
||||
The abstract from the paper is the following:
|
||||
|
||||
*Transformers-based models, such as BERT, have been one of the most successful deep learning models for NLP.
|
||||
Unfortunately, one of their core limitations is the quadratic dependency (mainly in terms of memory) on the sequence
|
||||
length due to their full attention mechanism. To remedy this, we propose, BigBird, a sparse attention mechanism that
|
||||
reduces this quadratic dependency to linear. We show that BigBird is a universal approximator of sequence functions and
|
||||
is Turing complete, thereby preserving these properties of the quadratic, full attention model. Along the way, our
|
||||
theoretical analysis reveals some of the benefits of having O(1) global tokens (such as CLS), that attend to the entire
|
||||
sequence as part of the sparse attention mechanism. The proposed sparse attention can handle sequences of length up to
|
||||
8x of what was previously possible using similar hardware. As a consequence of the capability to handle longer context,
|
||||
BigBird drastically improves performance on various NLP tasks such as question answering and summarization. We also
|
||||
propose novel applications to genomics data.*
|
||||
|
||||
Tips:
|
||||
|
||||
- For an in-detail explanation on how BigBird's attention works, see `this blog post
|
||||
<https://huggingface.co/blog/big-bird>`__.
|
||||
- BigBird comes with 2 implementations: **original_full** & **block_sparse**. For the sequence length < 1024, using
|
||||
**original_full** is advised as there is no benefit in using **block_sparse** attention.
|
||||
- The code currently uses window size of 3 blocks and 2 global blocks.
|
||||
- Sequence length must be divisible by block size.
|
||||
- Current implementation supports only **ITC**.
|
||||
- Current implementation doesn't support **num_random_blocks = 0**
|
||||
|
||||
This model was contributed by `vasudevgupta <https://huggingface.co/vasudevgupta>`__. The original code can be found
|
||||
`here <https://github.com/google-research/bigbird>`__.
|
||||
|
||||
BigBirdConfig
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.BigBirdConfig
|
||||
:members:
|
||||
|
||||
|
||||
BigBirdTokenizer
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.BigBirdTokenizer
|
||||
:members: build_inputs_with_special_tokens, get_special_tokens_mask,
|
||||
create_token_type_ids_from_sequences, save_vocabulary
|
||||
|
||||
BigBirdTokenizerFast
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.BigBirdTokenizerFast
|
||||
:members:
|
||||
|
||||
BigBird specific outputs
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.models.big_bird.modeling_big_bird.BigBirdForPreTrainingOutput
|
||||
:members:
|
||||
|
||||
|
||||
BigBirdModel
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.BigBirdModel
|
||||
:members: forward
|
||||
|
||||
|
||||
BigBirdForPreTraining
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.BigBirdForPreTraining
|
||||
:members: forward
|
||||
|
||||
|
||||
BigBirdForCausalLM
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.BigBirdForCausalLM
|
||||
:members: forward
|
||||
|
||||
|
||||
BigBirdForMaskedLM
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.BigBirdForMaskedLM
|
||||
:members: forward
|
||||
|
||||
|
||||
BigBirdForSequenceClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.BigBirdForSequenceClassification
|
||||
:members: forward
|
||||
|
||||
|
||||
BigBirdForMultipleChoice
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.BigBirdForMultipleChoice
|
||||
:members: forward
|
||||
|
||||
|
||||
BigBirdForTokenClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.BigBirdForTokenClassification
|
||||
:members: forward
|
||||
|
||||
|
||||
BigBirdForQuestionAnswering
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.BigBirdForQuestionAnswering
|
||||
:members: forward
|
||||
98
docs/source/model_doc/bigbird_pegasus.rst
Normal file
98
docs/source/model_doc/bigbird_pegasus.rst
Normal file
@ -0,0 +1,98 @@
|
||||
..
|
||||
Copyright 2021 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
BigBirdPegasus
|
||||
-----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
Overview
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The BigBird model was proposed in `Big Bird: Transformers for Longer Sequences <https://arxiv.org/abs/2007.14062>`__ by
|
||||
Zaheer, Manzil and Guruganesh, Guru and Dubey, Kumar Avinava and Ainslie, Joshua and Alberti, Chris and Ontanon,
|
||||
Santiago and Pham, Philip and Ravula, Anirudh and Wang, Qifan and Yang, Li and others. BigBird, is a sparse-attention
|
||||
based transformer which extends Transformer based models, such as BERT to much longer sequences. In addition to sparse
|
||||
attention, BigBird also applies global attention as well as random attention to the input sequence. Theoretically, it
|
||||
has been shown that applying sparse, global, and random attention approximates full attention, while being
|
||||
computationally much more efficient for longer sequences. As a consequence of the capability to handle longer context,
|
||||
BigBird has shown improved performance on various long document NLP tasks, such as question answering and
|
||||
summarization, compared to BERT or RoBERTa.
|
||||
|
||||
The abstract from the paper is the following:
|
||||
|
||||
*Transformers-based models, such as BERT, have been one of the most successful deep learning models for NLP.
|
||||
Unfortunately, one of their core limitations is the quadratic dependency (mainly in terms of memory) on the sequence
|
||||
length due to their full attention mechanism. To remedy this, we propose, BigBird, a sparse attention mechanism that
|
||||
reduces this quadratic dependency to linear. We show that BigBird is a universal approximator of sequence functions and
|
||||
is Turing complete, thereby preserving these properties of the quadratic, full attention model. Along the way, our
|
||||
theoretical analysis reveals some of the benefits of having O(1) global tokens (such as CLS), that attend to the entire
|
||||
sequence as part of the sparse attention mechanism. The proposed sparse attention can handle sequences of length up to
|
||||
8x of what was previously possible using similar hardware. As a consequence of the capability to handle longer context,
|
||||
BigBird drastically improves performance on various NLP tasks such as question answering and summarization. We also
|
||||
propose novel applications to genomics data.*
|
||||
|
||||
Tips:
|
||||
|
||||
- For an in-detail explanation on how BigBird's attention works, see `this blog post
|
||||
<https://huggingface.co/blog/big-bird>`__.
|
||||
- BigBird comes with 2 implementations: **original_full** & **block_sparse**. For the sequence length < 1024, using
|
||||
**original_full** is advised as there is no benefit in using **block_sparse** attention.
|
||||
- The code currently uses window size of 3 blocks and 2 global blocks.
|
||||
- Sequence length must be divisible by block size.
|
||||
- Current implementation supports only **ITC**.
|
||||
- Current implementation doesn't support **num_random_blocks = 0**.
|
||||
- BigBirdPegasus uses the `PegasusTokenizer
|
||||
<https://github.com/huggingface/transformers/blob/master/src/transformers/models/pegasus/tokenization_pegasus.py>`__.
|
||||
|
||||
The original code can be found `here <https://github.com/google-research/bigbird>`__.
|
||||
|
||||
BigBirdPegasusConfig
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.BigBirdPegasusConfig
|
||||
:members:
|
||||
|
||||
|
||||
BigBirdPegasusModel
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.BigBirdPegasusModel
|
||||
:members: forward
|
||||
|
||||
|
||||
BigBirdPegasusForConditionalGeneration
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.BigBirdPegasusForConditionalGeneration
|
||||
:members: forward
|
||||
|
||||
|
||||
BigBirdPegasusForSequenceClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.BigBirdPegasusForSequenceClassification
|
||||
:members: forward
|
||||
|
||||
|
||||
BigBirdPegasusForQuestionAnswering
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.BigBirdPegasusForQuestionAnswering
|
||||
:members: forward
|
||||
|
||||
|
||||
BigBirdPegasusForCausalLM
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.BigBirdPegasusForCausalLM
|
||||
:members: forward
|
||||
|
||||
|
||||
@ -36,7 +36,8 @@ and code publicly available. Human evaluations show our best models are superior
|
||||
dialogue in terms of engagingness and humanness measurements. We then discuss the limitations of this work by analyzing
|
||||
failure cases of our models.*
|
||||
|
||||
The authors' code can be found `here <https://github.com/facebookresearch/ParlAI>`__ .
|
||||
This model was contributed by `sshleifer <https://huggingface.co/sshleifer>`__. The authors' code can be found `here
|
||||
<https://github.com/facebookresearch/ParlAI>`__ .
|
||||
|
||||
|
||||
Implementation Notes
|
||||
|
||||
@ -39,7 +39,8 @@ and code publicly available. Human evaluations show our best models are superior
|
||||
dialogue in terms of engagingness and humanness measurements. We then discuss the limitations of this work by analyzing
|
||||
failure cases of our models.*
|
||||
|
||||
The authors' code can be found `here <https://github.com/facebookresearch/ParlAI>`__ .
|
||||
This model was contributed by `patrickvonplaten <https://huggingface.co/patrickvonplaten>`__. The authors' code can be
|
||||
found `here <https://github.com/facebookresearch/ParlAI>`__ .
|
||||
|
||||
BlenderbotSmallConfig
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
@ -43,4 +43,5 @@ Tips:
|
||||
that is sadly not open-sourced yet. It would be very useful for the community, if someone tries to implement the
|
||||
algorithm to make BORT fine-tuning work.
|
||||
|
||||
The original code can be found `here <https://github.com/alexa/bort/>`__.
|
||||
This model was contributed by `stefan-it <https://huggingface.co/stefan-it>`__. The original code can be found `here
|
||||
<https://github.com/alexa/bort/>`__.
|
||||
|
||||
@ -37,7 +37,8 @@ Tips:
|
||||
- This implementation is the same as RoBERTa. Refer to the :doc:`documentation of RoBERTa <roberta>` for usage examples
|
||||
as well as the information relative to the inputs and outputs.
|
||||
|
||||
The original code can be found `here <https://camembert-model.fr/>`__.
|
||||
This model was contributed by `camembert <https://huggingface.co/camembert>`__. The original code can be found `here
|
||||
<https://camembert-model.fr/>`__.
|
||||
|
||||
CamembertConfig
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
154
docs/source/model_doc/clip.rst
Normal file
154
docs/source/model_doc/clip.rst
Normal file
@ -0,0 +1,154 @@
|
||||
..
|
||||
Copyright 2021 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
CLIP
|
||||
-----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
Overview
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The CLIP model was proposed in `Learning Transferable Visual Models From Natural Language Supervision
|
||||
<https://arxiv.org/abs/2103.00020>`__ by Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh,
|
||||
Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, Ilya Sutskever. CLIP
|
||||
(Contrastive Language-Image Pre-Training) is a neural network trained on a variety of (image, text) pairs. It can be
|
||||
instructed in natural language to predict the most relevant text snippet, given an image, without directly optimizing
|
||||
for the task, similarly to the zero-shot capabilities of GPT-2 and 3.
|
||||
|
||||
The abstract from the paper is the following:
|
||||
|
||||
*State-of-the-art computer vision systems are trained to predict a fixed set of predetermined object categories. This
|
||||
restricted form of supervision limits their generality and usability since additional labeled data is needed to specify
|
||||
any other visual concept. Learning directly from raw text about images is a promising alternative which leverages a
|
||||
much broader source of supervision. We demonstrate that the simple pre-training task of predicting which caption goes
|
||||
with which image is an efficient and scalable way to learn SOTA image representations from scratch on a dataset of 400
|
||||
million (image, text) pairs collected from the internet. After pre-training, natural language is used to reference
|
||||
learned visual concepts (or describe new ones) enabling zero-shot transfer of the model to downstream tasks. We study
|
||||
the performance of this approach by benchmarking on over 30 different existing computer vision datasets, spanning tasks
|
||||
such as OCR, action recognition in videos, geo-localization, and many types of fine-grained object classification. The
|
||||
model transfers non-trivially to most tasks and is often competitive with a fully supervised baseline without the need
|
||||
for any dataset specific training. For instance, we match the accuracy of the original ResNet-50 on ImageNet zero-shot
|
||||
without needing to use any of the 1.28 million training examples it was trained on. We release our code and pre-trained
|
||||
model weights at this https URL.*
|
||||
|
||||
Usage
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
CLIP is a multi-modal vision and language model. It can be used for image-text similarity and for zero-shot image
|
||||
classification. CLIP uses a ViT like transformer to get visual features and a causal language model to get the text
|
||||
features. Both the text and visual features are then projected to a latent space with identical dimension. The dot
|
||||
product between the projected image and text features is then used as a similar score.
|
||||
|
||||
To feed images to the Transformer encoder, each image is split into a sequence of fixed-size non-overlapping patches,
|
||||
which are then linearly embedded. A [CLS] token is added to serve as representation of an entire image. The authors
|
||||
also add absolute position embeddings, and feed the resulting sequence of vectors to a standard Transformer encoder.
|
||||
The :class:`~transformers.CLIPFeatureExtractor` can be used to resize (or rescale) and normalize images for the model.
|
||||
|
||||
The :class:`~transformers.CLIPTokenizer` is used to encode the text. The :class:`~transformers.CLIPProcessor` wraps
|
||||
:class:`~transformers.CLIPFeatureExtractor` and :class:`~transformers.CLIPTokenizer` into a single instance to both
|
||||
encode the text and prepare the images. The following example shows how to get the image-text similarity scores using
|
||||
:class:`~transformers.CLIPProcessor` and :class:`~transformers.CLIPModel`.
|
||||
|
||||
|
||||
.. code-block::
|
||||
|
||||
>>> import torch
|
||||
>>> from PIL import Image
|
||||
>>> import requests
|
||||
|
||||
>>> from transformers import CLIPProcessor, CLIPModel
|
||||
|
||||
>>> model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
|
||||
>>> processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
|
||||
|
||||
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
||||
>>> image = Image.open(requests.get(url, stream=True).raw)
|
||||
|
||||
>>> inputs = processor(text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="pt", padding=True)
|
||||
|
||||
>>> outputs = model(**inputs)
|
||||
>>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score
|
||||
>>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities
|
||||
|
||||
|
||||
This model was contributed by `valhalla <https://huggingface.co/valhalla>`__. The original code can be found `here
|
||||
<https://github.com/openai/CLIP>`__.
|
||||
|
||||
CLIPConfig
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.CLIPConfig
|
||||
:members: from_text_vision_configs
|
||||
|
||||
|
||||
CLIPTextConfig
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.CLIPTextConfig
|
||||
:members:
|
||||
|
||||
|
||||
CLIPVisionConfig
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.CLIPVisionConfig
|
||||
:members:
|
||||
|
||||
|
||||
|
||||
CLIPTokenizer
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.CLIPTokenizer
|
||||
:members: build_inputs_with_special_tokens, get_special_tokens_mask,
|
||||
create_token_type_ids_from_sequences, save_vocabulary
|
||||
|
||||
CLIPTokenizerFast
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.CLIPTokenizerFast
|
||||
:members:
|
||||
|
||||
|
||||
CLIPFeatureExtractor
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.CLIPFeatureExtractor
|
||||
:members:
|
||||
|
||||
|
||||
CLIPProcessor
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.CLIPProcessor
|
||||
:members:
|
||||
|
||||
|
||||
|
||||
CLIPModel
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.CLIPModel
|
||||
:members: forward, get_text_features, get_image_features
|
||||
|
||||
|
||||
CLIPTextModel
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.CLIPTextModel
|
||||
:members: forward
|
||||
|
||||
|
||||
CLIPVisionModel
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.CLIPVisionModel
|
||||
:members: forward
|
||||
@ -34,8 +34,10 @@ ConvBERT significantly outperforms BERT and its variants in various downstream t
|
||||
fewer model parameters. Remarkably, ConvBERTbase model achieves 86.4 GLUE score, 0.7 higher than ELECTRAbase, while
|
||||
using less than 1/4 training cost. Code and pre-trained models will be released.*
|
||||
|
||||
ConvBERT training tips are similar to those of BERT. The original implementation can be found here:
|
||||
https://github.com/yitu-opensource/ConvBert
|
||||
ConvBERT training tips are similar to those of BERT.
|
||||
|
||||
This model was contributed by `abhishek <https://huggingface.co/abhishek>`__. The original implementation can be found
|
||||
here: https://github.com/yitu-opensource/ConvBert
|
||||
|
||||
ConvBertConfig
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
@ -56,8 +58,7 @@ ConvBertTokenizerFast
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.ConvBertTokenizerFast
|
||||
:members: build_inputs_with_special_tokens, get_special_tokens_mask,
|
||||
create_token_type_ids_from_sequences, save_vocabulary
|
||||
:members:
|
||||
|
||||
|
||||
ConvBertModel
|
||||
|
||||
45
docs/source/model_doc/cpm.rst
Normal file
45
docs/source/model_doc/cpm.rst
Normal file
@ -0,0 +1,45 @@
|
||||
..
|
||||
Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
CPM
|
||||
-----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
Overview
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The CPM model was proposed in `CPM: A Large-scale Generative Chinese Pre-trained Language Model
|
||||
<https://arxiv.org/abs/2012.00413>`__ by Zhengyan Zhang, Xu Han, Hao Zhou, Pei Ke, Yuxian Gu, Deming Ye, Yujia Qin,
|
||||
Yusheng Su, Haozhe Ji, Jian Guan, Fanchao Qi, Xiaozhi Wang, Yanan Zheng, Guoyang Zeng, Huanqi Cao, Shengqi Chen,
|
||||
Daixuan Li, Zhenbo Sun, Zhiyuan Liu, Minlie Huang, Wentao Han, Jie Tang, Juanzi Li, Xiaoyan Zhu, Maosong Sun.
|
||||
|
||||
The abstract from the paper is the following:
|
||||
|
||||
*Pre-trained Language Models (PLMs) have proven to be beneficial for various downstream NLP tasks. Recently, GPT-3,
|
||||
with 175 billion parameters and 570GB training data, drew a lot of attention due to the capacity of few-shot (even
|
||||
zero-shot) learning. However, applying GPT-3 to address Chinese NLP tasks is still challenging, as the training corpus
|
||||
of GPT-3 is primarily English, and the parameters are not publicly available. In this technical report, we release the
|
||||
Chinese Pre-trained Language Model (CPM) with generative pre-training on large-scale Chinese training data. To the best
|
||||
of our knowledge, CPM, with 2.6 billion parameters and 100GB Chinese training data, is the largest Chinese pre-trained
|
||||
language model, which could facilitate several downstream Chinese NLP tasks, such as conversation, essay generation,
|
||||
cloze test, and language understanding. Extensive experiments demonstrate that CPM achieves strong performance on many
|
||||
NLP tasks in the settings of few-shot (even zero-shot) learning.*
|
||||
|
||||
This model was contributed by `canwenxu <https://huggingface.co/canwenxu>`__. The original implementation can be found
|
||||
here: https://github.com/TsinghuaAI/CPM-Generate
|
||||
|
||||
Note: We only have a tokenizer here, since the model architecture is the same as GPT-2.
|
||||
|
||||
CpmTokenizer
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.CpmTokenizer
|
||||
:members:
|
||||
@ -46,7 +46,8 @@ Tips:
|
||||
`reusing the past in generative models <../quickstart.html#using-the-past>`__ for more information on the usage of
|
||||
this argument.
|
||||
|
||||
The original code can be found `here <https://github.com/salesforce/ctrl>`__.
|
||||
This model was contributed by `keskarnitishr <https://huggingface.co/keskarnitishr>`__. The original code can be found
|
||||
`here <https://github.com/salesforce/ctrl>`__.
|
||||
|
||||
|
||||
CTRLConfig
|
||||
|
||||
@ -38,7 +38,8 @@ the training data performs consistently better on a wide range of NLP tasks, ach
|
||||
pre-trained models will be made publicly available at https://github.com/microsoft/DeBERTa.*
|
||||
|
||||
|
||||
The original code can be found `here <https://github.com/microsoft/DeBERTa>`__.
|
||||
This model was contributed by `DeBERTa <https://huggingface.co/DeBERTa>`__. The original code can be found `here
|
||||
<https://github.com/microsoft/DeBERTa>`__.
|
||||
|
||||
|
||||
DebertaConfig
|
||||
@ -55,12 +56,18 @@ DebertaTokenizer
|
||||
:members: build_inputs_with_special_tokens, get_special_tokens_mask,
|
||||
create_token_type_ids_from_sequences, save_vocabulary
|
||||
|
||||
DebertaTokenizerFast
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.DebertaTokenizerFast
|
||||
:members: build_inputs_with_special_tokens, create_token_type_ids_from_sequences
|
||||
|
||||
|
||||
DebertaModel
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.DebertaModel
|
||||
:members:
|
||||
:members: forward
|
||||
|
||||
|
||||
DebertaPreTrainedModel
|
||||
@ -74,25 +81,25 @@ DebertaForMaskedLM
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.DebertaForMaskedLM
|
||||
:members:
|
||||
:members: forward
|
||||
|
||||
|
||||
DebertaForSequenceClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.DebertaForSequenceClassification
|
||||
:members:
|
||||
:members: forward
|
||||
|
||||
|
||||
DebertaForTokenClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.DebertaForTokenClassification
|
||||
:members:
|
||||
:members: forward
|
||||
|
||||
|
||||
DebertaForQuestionAnswering
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.DebertaForQuestionAnswering
|
||||
:members:
|
||||
:members: forward
|
||||
|
||||
119
docs/source/model_doc/deberta_v2.rst
Normal file
119
docs/source/model_doc/deberta_v2.rst
Normal file
@ -0,0 +1,119 @@
|
||||
..
|
||||
Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
DeBERTa-v2
|
||||
-----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
Overview
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The DeBERTa model was proposed in `DeBERTa: Decoding-enhanced BERT with Disentangled Attention
|
||||
<https://arxiv.org/abs/2006.03654>`__ by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen It is based on Google's
|
||||
BERT model released in 2018 and Facebook's RoBERTa model released in 2019.
|
||||
|
||||
It builds on RoBERTa with disentangled attention and enhanced mask decoder training with half of the data used in
|
||||
RoBERTa.
|
||||
|
||||
The abstract from the paper is the following:
|
||||
|
||||
*Recent progress in pre-trained neural language models has significantly improved the performance of many natural
|
||||
language processing (NLP) tasks. In this paper we propose a new model architecture DeBERTa (Decoding-enhanced BERT with
|
||||
disentangled attention) that improves the BERT and RoBERTa models using two novel techniques. The first is the
|
||||
disentangled attention mechanism, where each word is represented using two vectors that encode its content and
|
||||
position, respectively, and the attention weights among words are computed using disentangled matrices on their
|
||||
contents and relative positions. Second, an enhanced mask decoder is used to replace the output softmax layer to
|
||||
predict the masked tokens for model pretraining. We show that these two techniques significantly improve the efficiency
|
||||
of model pretraining and performance of downstream tasks. Compared to RoBERTa-Large, a DeBERTa model trained on half of
|
||||
the training data performs consistently better on a wide range of NLP tasks, achieving improvements on MNLI by +0.9%
|
||||
(90.2% vs. 91.1%), on SQuAD v2.0 by +2.3% (88.4% vs. 90.7%) and RACE by +3.6% (83.2% vs. 86.8%). The DeBERTa code and
|
||||
pre-trained models will be made publicly available at https://github.com/microsoft/DeBERTa.*
|
||||
|
||||
|
||||
The following information is visible directly on the [original implementation
|
||||
repository](https://github.com/microsoft/DeBERTa). DeBERTa v2 is the second version of the DeBERTa model. It includes
|
||||
the 1.5B model used for the SuperGLUE single-model submission and achieving 89.9, versus human baseline 89.8. You can
|
||||
find more details about this submission in the authors'
|
||||
[blog](https://www.microsoft.com/en-us/research/blog/microsoft-deberta-surpasses-human-performance-on-the-superglue-benchmark/)
|
||||
|
||||
New in v2:
|
||||
|
||||
- **Vocabulary** In v2 the tokenizer is changed to use a new vocabulary of size 128K built from the training data.
|
||||
Instead of a GPT2-based tokenizer, the tokenizer is now
|
||||
[sentencepiece-based](https://github.com/google/sentencepiece) tokenizer.
|
||||
- **nGiE(nGram Induced Input Encoding)** The DeBERTa-v2 model uses an additional convolution layer aside with the first
|
||||
transformer layer to better learn the local dependency of input tokens.
|
||||
- **Sharing position projection matrix with content projection matrix in attention layer** Based on previous
|
||||
experiments, this can save parameters without affecting the performance.
|
||||
- **Apply bucket to encode relative postions** The DeBERTa-v2 model uses log bucket to encode relative positions
|
||||
similar to T5.
|
||||
- **900M model & 1.5B model** Two additional model sizes are available: 900M and 1.5B, which significantly improves the
|
||||
performance of downstream tasks.
|
||||
|
||||
This model was contributed by `DeBERTa <https://huggingface.co/DeBERTa>`__. The original code can be found `here
|
||||
<https://github.com/microsoft/DeBERTa>`__.
|
||||
|
||||
|
||||
DebertaV2Config
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.DebertaV2Config
|
||||
:members:
|
||||
|
||||
|
||||
DebertaV2Tokenizer
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.DebertaV2Tokenizer
|
||||
:members: build_inputs_with_special_tokens, get_special_tokens_mask,
|
||||
create_token_type_ids_from_sequences, save_vocabulary
|
||||
|
||||
|
||||
DebertaV2Model
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.DebertaV2Model
|
||||
:members: forward
|
||||
|
||||
|
||||
DebertaV2PreTrainedModel
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.DebertaV2PreTrainedModel
|
||||
:members: forward
|
||||
|
||||
|
||||
DebertaV2ForMaskedLM
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.DebertaV2ForMaskedLM
|
||||
:members: forward
|
||||
|
||||
|
||||
DebertaV2ForSequenceClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.DebertaV2ForSequenceClassification
|
||||
:members: forward
|
||||
|
||||
|
||||
DebertaV2ForTokenClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.DebertaV2ForTokenClassification
|
||||
:members: forward
|
||||
|
||||
|
||||
DebertaV2ForQuestionAnswering
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.DebertaV2ForQuestionAnswering
|
||||
:members: forward
|
||||
111
docs/source/model_doc/deit.rst
Normal file
111
docs/source/model_doc/deit.rst
Normal file
@ -0,0 +1,111 @@
|
||||
..
|
||||
Copyright 2021 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
DeiT
|
||||
-----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
.. note::
|
||||
|
||||
This is a recently introduced model so the API hasn't been tested extensively. There may be some bugs or slight
|
||||
breaking changes to fix it in the future. If you see something strange, file a `Github Issue
|
||||
<https://github.com/huggingface/transformers/issues/new?assignees=&labels=&template=bug-report.md&title>`__.
|
||||
|
||||
|
||||
Overview
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The DeiT model was proposed in `Training data-efficient image transformers & distillation through attention
|
||||
<https://arxiv.org/abs/2012.12877>`__ by Hugo Touvron, Matthieu Cord, Matthijs Douze, Francisco Massa, Alexandre
|
||||
Sablayrolles, Hervé Jégou. The `Vision Transformer (ViT) <https://huggingface.co/transformers/model_doc/vit.html>`__
|
||||
introduced in `Dosovitskiy et al., 2020 <https://arxiv.org/abs/2010.11929>`__ has shown that one can match or even
|
||||
outperform existing convolutional neural networks using a Transformer encoder (BERT-like). However, the ViT models
|
||||
introduced in that paper required training on expensive infrastructure for multiple weeks, using external data. DeiT
|
||||
(data-efficient image transformers) are more efficiently trained transformers for image classification, requiring far
|
||||
less data and far less computing resources compared to the original ViT models.
|
||||
|
||||
The abstract from the paper is the following:
|
||||
|
||||
*Recently, neural networks purely based on attention were shown to address image understanding tasks such as image
|
||||
classification. However, these visual transformers are pre-trained with hundreds of millions of images using an
|
||||
expensive infrastructure, thereby limiting their adoption. In this work, we produce a competitive convolution-free
|
||||
transformer by training on Imagenet only. We train them on a single computer in less than 3 days. Our reference vision
|
||||
transformer (86M parameters) achieves top-1 accuracy of 83.1% (single-crop evaluation) on ImageNet with no external
|
||||
data. More importantly, we introduce a teacher-student strategy specific to transformers. It relies on a distillation
|
||||
token ensuring that the student learns from the teacher through attention. We show the interest of this token-based
|
||||
distillation, especially when using a convnet as a teacher. This leads us to report results competitive with convnets
|
||||
for both Imagenet (where we obtain up to 85.2% accuracy) and when transferring to other tasks. We share our code and
|
||||
models.*
|
||||
|
||||
Tips:
|
||||
|
||||
- Compared to ViT, DeiT models use a so-called distillation token to effectively learn from a teacher (which, in the
|
||||
DeiT paper, is a ResNet like-model). The distillation token is learned through backpropagation, by interacting with
|
||||
the class ([CLS]) and patch tokens through the self-attention layers.
|
||||
- There are 2 ways to fine-tune distilled models, either (1) in a classic way, by only placing a prediction head on top
|
||||
of the final hidden state of the class token and not using the distillation signal, or (2) by placing both a
|
||||
prediction head on top of the class token and on top of the distillation token. In that case, the [CLS] prediction
|
||||
head is trained using regular cross-entropy between the prediction of the head and the ground-truth label, while the
|
||||
distillation prediction head is trained using hard distillation (cross-entropy between the prediction of the
|
||||
distillation head and the label predicted by the teacher). At inference time, one takes the average prediction
|
||||
between both heads as final prediction. (2) is also called "fine-tuning with distillation", because one relies on a
|
||||
teacher that has already been fine-tuned on the downstream dataset. In terms of models, (1) corresponds to
|
||||
:class:`~transformers.DeiTForImageClassification` and (2) corresponds to
|
||||
:class:`~transformers.DeiTForImageClassificationWithTeacher`.
|
||||
- Note that the authors also did try soft distillation for (2) (in which case the distillation prediction head is
|
||||
trained using KL divergence to match the softmax output of the teacher), but hard distillation gave the best results.
|
||||
- All released checkpoints were pre-trained and fine-tuned on ImageNet-1k only. No external data was used. This is in
|
||||
contrast with the original ViT model, which used external data like the JFT-300M dataset/Imagenet-21k for
|
||||
pre-training.
|
||||
- The authors of DeiT also released more efficiently trained ViT models, which you can directly plug into
|
||||
:class:`~transformers.ViTModel` or :class:`~transformers.ViTForImageClassification`. Techniques like data
|
||||
augmentation, optimization, and regularization were used in order to simulate training on a much larger dataset
|
||||
(while only using ImageNet-1k for pre-training). There are 4 variants available (in 3 different sizes):
|
||||
`facebook/deit-tiny-patch16-224`, `facebook/deit-small-patch16-224`, `facebook/deit-base-patch16-224` and
|
||||
`facebook/deit-base-patch16-384`. Note that one should use :class:`~transformers.DeiTFeatureExtractor` in order to
|
||||
prepare images for the model.
|
||||
|
||||
This model was contributed by `nielsr <https://huggingface.co/nielsr>`__.
|
||||
|
||||
|
||||
DeiTConfig
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.DeiTConfig
|
||||
:members:
|
||||
|
||||
|
||||
DeiTFeatureExtractor
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.DeiTFeatureExtractor
|
||||
:members: __call__
|
||||
|
||||
|
||||
DeiTModel
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.DeiTModel
|
||||
:members: forward
|
||||
|
||||
|
||||
DeiTForImageClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.DeiTForImageClassification
|
||||
:members: forward
|
||||
|
||||
|
||||
DeiTForImageClassificationWithTeacher
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.DeiTForImageClassificationWithTeacher
|
||||
:members: forward
|
||||
@ -44,8 +44,8 @@ Tips:
|
||||
- DistilBERT doesn't have options to select the input positions (:obj:`position_ids` input). This could be added if
|
||||
necessary though, just let us know if you need this option.
|
||||
|
||||
The original code can be found `here
|
||||
<https://github.com/huggingface/transformers/tree/master/examples/distillation>`__.
|
||||
This model was contributed by `victorsanh <https://huggingface.co/victorsanh>`__. The original code can be found
|
||||
:prefix_link:`here <examples/research-projects/distillation>`.
|
||||
|
||||
|
||||
DistilBertConfig
|
||||
|
||||
@ -30,7 +30,8 @@ our dense retriever outperforms a strong Lucene-BM25 system largely by 9%-19% ab
|
||||
retrieval accuracy, and helps our end-to-end QA system establish new state-of-the-art on multiple open-domain QA
|
||||
benchmarks.*
|
||||
|
||||
The original code can be found `here <https://github.com/facebookresearch/DPR>`__.
|
||||
This model was contributed by `lhoestq <https://huggingface.co/lhoestq>`__. The original code can be found `here
|
||||
<https://github.com/facebookresearch/DPR>`__.
|
||||
|
||||
|
||||
DPRConfig
|
||||
|
||||
@ -54,7 +54,8 @@ Tips:
|
||||
:class:`~transformers.ElectraForPreTraining` model (the classification head will be randomly initialized as it
|
||||
doesn't exist in the generator).
|
||||
|
||||
The original code can be found `here <https://github.com/google-research/electra>`__.
|
||||
This model was contributed by `lysandre <https://huggingface.co/lysandre>`__. The original code can be found `here
|
||||
<https://github.com/google-research/electra>`__.
|
||||
|
||||
|
||||
ElectraConfig
|
||||
@ -184,3 +185,52 @@ TFElectraForQuestionAnswering
|
||||
|
||||
.. autoclass:: transformers.TFElectraForQuestionAnswering
|
||||
:members: call
|
||||
|
||||
|
||||
FlaxElectraModel
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.FlaxElectraModel
|
||||
:members: __call__
|
||||
|
||||
|
||||
FlaxElectraForPreTraining
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.FlaxElectraForPreTraining
|
||||
:members: __call__
|
||||
|
||||
|
||||
FlaxElectraForMaskedLM
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.FlaxElectraForMaskedLM
|
||||
:members: __call__
|
||||
|
||||
|
||||
FlaxElectraForSequenceClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.FlaxElectraForSequenceClassification
|
||||
:members: __call__
|
||||
|
||||
|
||||
FlaxElectraForMultipleChoice
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.FlaxElectraForMultipleChoice
|
||||
:members: __call__
|
||||
|
||||
|
||||
FlaxElectraForTokenClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.FlaxElectraForTokenClassification
|
||||
:members: __call__
|
||||
|
||||
|
||||
FlaxElectraForQuestionAnswering
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.FlaxElectraForQuestionAnswering
|
||||
:members: __call__
|
||||
|
||||
@ -35,7 +35,8 @@ time they outperform other pretraining approaches. Different versions of FlauBER
|
||||
protocol for the downstream tasks, called FLUE (French Language Understanding Evaluation), are shared to the research
|
||||
community for further reproducible experiments in French NLP.*
|
||||
|
||||
The original code can be found `here <https://github.com/getalp/Flaubert>`__.
|
||||
This model was contributed by `formiel <https://huggingface.co/formiel>`__. The original code can be found `here
|
||||
<https://github.com/getalp/Flaubert>`__.
|
||||
|
||||
|
||||
FlaubertConfig
|
||||
|
||||
@ -34,7 +34,8 @@ data, then decode using noisy channel model reranking. Our submissions are ranke
|
||||
human evaluation campaign. On En->De, our system significantly outperforms other systems as well as human translations.
|
||||
This system improves upon our WMT'18 submission by 4.5 BLEU points.*
|
||||
|
||||
The original code can be found here <https://github.com/pytorch/fairseq/tree/master/examples/wmt19>__.
|
||||
This model was contributed by `stas <https://huggingface.co/stas>`__. The original code can be found here
|
||||
<https://github.com/pytorch/fairseq/tree/master/examples/wmt19>__.
|
||||
|
||||
Implementation Notes
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
@ -56,7 +57,7 @@ FSMTTokenizer
|
||||
|
||||
.. autoclass:: transformers.FSMTTokenizer
|
||||
:members: build_inputs_with_special_tokens, get_special_tokens_mask,
|
||||
create_token_type_ids_from_sequences, prepare_seq2seq_batch, save_vocabulary
|
||||
create_token_type_ids_from_sequences, save_vocabulary
|
||||
|
||||
|
||||
FSMTModel
|
||||
|
||||
@ -49,7 +49,8 @@ Tips:
|
||||
:class:`~transformers.FunnelBaseModel`, :class:`~transformers.FunnelForSequenceClassification` and
|
||||
:class:`~transformers.FunnelForMultipleChoice`.
|
||||
|
||||
The original code can be found `here <https://github.com/laiguokun/Funnel-Transformer>`__.
|
||||
This model was contributed by `sgugger <https://huggingface.co/sgugger>`__. The original code can be found `here
|
||||
<https://github.com/laiguokun/Funnel-Transformer>`__.
|
||||
|
||||
|
||||
FunnelConfig
|
||||
|
||||
@ -45,12 +45,13 @@ Tips:
|
||||
`Write With Transformer <https://transformer.huggingface.co/doc/gpt>`__ is a webapp created and hosted by Hugging Face
|
||||
showcasing the generative capabilities of several models. GPT is one of them.
|
||||
|
||||
The original code can be found `here <https://github.com/openai/finetune-transformer-lm>`__.
|
||||
This model was contributed by `thomwolf <https://huggingface.co/thomwolf>`__. The original code can be found `here
|
||||
<https://github.com/openai/finetune-transformer-lm>`__.
|
||||
|
||||
Note:
|
||||
|
||||
If you want to reproduce the original tokenization process of the `OpenAI GPT` paper, you will need to install ``ftfy``
|
||||
and ``SpaCy``::
|
||||
and ``SpaCy``:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
|
||||
@ -45,7 +45,8 @@ Tips:
|
||||
Hugging Face showcasing the generative capabilities of several models. GPT-2 is one of them and is available in five
|
||||
different sizes: small, medium, large, xl and a distilled version of the small checkpoint: `distilgpt-2`.
|
||||
|
||||
The original code can be found `here <https://openai.com/blog/better-language-models/>`__.
|
||||
This model was contributed by `thomwolf <https://huggingface.co/thomwolf>`__. The original code can be found `here
|
||||
<https://openai.com/blog/better-language-models/>`__.
|
||||
|
||||
|
||||
GPT2Config
|
||||
|
||||
67
docs/source/model_doc/gpt_neo.rst
Normal file
67
docs/source/model_doc/gpt_neo.rst
Normal file
@ -0,0 +1,67 @@
|
||||
..
|
||||
Copyright 2021 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
GPT Neo
|
||||
-----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
Overview
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The GPTNeo model was released in the `EleutherAI/gpt-neo <https://github.com/EleutherAI/gpt-neo>`__ repository by Sid
|
||||
Black, Stella Biderman, Leo Gao, Phil Wang and Connor Leahy. It is a GPT2 like causal language model trained on the
|
||||
`Pile <https://pile.eleuther.ai/>`__ dataset.
|
||||
|
||||
The architecture is similar to GPT2 except that GPT Neo uses local attention in every other layer with a window size of
|
||||
256 tokens.
|
||||
|
||||
This model was contributed by `valhalla <https://huggingface.co/valhalla>`__.
|
||||
|
||||
Generation
|
||||
_______________________________________________________________________________________________________________________
|
||||
|
||||
The :obj:`generate()` method can be used to generate text using GPT Neo model.
|
||||
|
||||
.. code-block::
|
||||
|
||||
>>> from transformers import GPTNeoForCausalLM, GPT2Tokenizer
|
||||
>>> model = GPTNeoForCausalLM.from_pretrained("EleutherAI/gpt-neo-1.3B")
|
||||
>>> tokenizer = GPT2Tokenizer.from_pretrained("EleutherAI/gpt-neo-1.3B")
|
||||
|
||||
>>> prompt = "In a shocking finding, scientists discovered a herd of unicorns living in a remote, " \
|
||||
... "previously unexplored valley, in the Andes Mountains. Even more surprising to the " \
|
||||
... "researchers was the fact that the unicorns spoke perfect English."
|
||||
|
||||
>>> input_ids = tokenizer(prompt, return_tensors="pt").input_ids
|
||||
|
||||
>>> gen_tokens = model.generate(input_ids, do_sample=True, temperature=0.9, max_length=100,)
|
||||
>>> gen_text = tokenizer.batch_decode(gen_tokens)[0]
|
||||
|
||||
|
||||
GPTNeoConfig
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.GPTNeoConfig
|
||||
:members:
|
||||
|
||||
|
||||
GPTNeoModel
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.GPTNeoModel
|
||||
:members: forward
|
||||
|
||||
|
||||
GPTNeoForCausalLM
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.GPTNeoForCausalLM
|
||||
:members: forward
|
||||
@ -40,23 +40,25 @@ Examples of use:
|
||||
|
||||
.. code-block::
|
||||
|
||||
from transformers import HerbertTokenizer, RobertaModel
|
||||
>>> from transformers import HerbertTokenizer, RobertaModel
|
||||
|
||||
tokenizer = HerbertTokenizer.from_pretrained("allegro/herbert-klej-cased-tokenizer-v1")
|
||||
model = RobertaModel.from_pretrained("allegro/herbert-klej-cased-v1")
|
||||
>>> tokenizer = HerbertTokenizer.from_pretrained("allegro/herbert-klej-cased-tokenizer-v1")
|
||||
>>> model = RobertaModel.from_pretrained("allegro/herbert-klej-cased-v1")
|
||||
|
||||
encoded_input = tokenizer.encode("Kto ma lepszą sztukę, ma lepszy rząd – to jasne.", return_tensors='pt')
|
||||
outputs = model(encoded_input)
|
||||
>>> encoded_input = tokenizer.encode("Kto ma lepszą sztukę, ma lepszy rząd – to jasne.", return_tensors='pt')
|
||||
>>> outputs = model(encoded_input)
|
||||
|
||||
# HerBERT can also be loaded using AutoTokenizer and AutoModel:
|
||||
import torch
|
||||
from transformers import AutoModel, AutoTokenizer
|
||||
>>> # HerBERT can also be loaded using AutoTokenizer and AutoModel:
|
||||
>>> import torch
|
||||
>>> from transformers import AutoModel, AutoTokenizer
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained("allegro/herbert-klej-cased-tokenizer-v1")
|
||||
model = AutoModel.from_pretrained("allegro/herbert-klej-cased-v1")
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("allegro/herbert-klej-cased-tokenizer-v1")
|
||||
>>> model = AutoModel.from_pretrained("allegro/herbert-klej-cased-v1")
|
||||
|
||||
|
||||
The original code can be found `here <https://github.com/allegro/HerBERT>`__.
|
||||
This model was contributed by `rmroczkowski <https://huggingface.co/rmroczkowski>`__. The original code can be found
|
||||
`here <https://github.com/allegro/HerBERT>`__.
|
||||
|
||||
|
||||
HerbertTokenizer
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
89
docs/source/model_doc/ibert.rst
Normal file
89
docs/source/model_doc/ibert.rst
Normal file
@ -0,0 +1,89 @@
|
||||
..
|
||||
Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
I-BERT
|
||||
-----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
Overview
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The I-BERT model was proposed in `I-BERT: Integer-only BERT Quantization <https://arxiv.org/abs/2101.01321>`__ by
|
||||
Sehoon Kim, Amir Gholami, Zhewei Yao, Michael W. Mahoney and Kurt Keutzer. It's a quantized version of RoBERTa running
|
||||
inference up to four times faster.
|
||||
|
||||
The abstract from the paper is the following:
|
||||
|
||||
*Transformer based models, like BERT and RoBERTa, have achieved state-of-the-art results in many Natural Language
|
||||
Processing tasks. However, their memory footprint, inference latency, and power consumption are prohibitive for
|
||||
efficient inference at the edge, and even at the data center. While quantization can be a viable solution for this,
|
||||
previous work on quantizing Transformer based models use floating-point arithmetic during inference, which cannot
|
||||
efficiently utilize integer-only logical units such as the recent Turing Tensor Cores, or traditional integer-only ARM
|
||||
processors. In this work, we propose I-BERT, a novel quantization scheme for Transformer based models that quantizes
|
||||
the entire inference with integer-only arithmetic. Based on lightweight integer-only approximation methods for
|
||||
nonlinear operations, e.g., GELU, Softmax, and Layer Normalization, I-BERT performs an end-to-end integer-only BERT
|
||||
inference without any floating point calculation. We evaluate our approach on GLUE downstream tasks using
|
||||
RoBERTa-Base/Large. We show that for both cases, I-BERT achieves similar (and slightly higher) accuracy as compared to
|
||||
the full-precision baseline. Furthermore, our preliminary implementation of I-BERT shows a speedup of 2.4 - 4.0x for
|
||||
INT8 inference on a T4 GPU system as compared to FP32 inference. The framework has been developed in PyTorch and has
|
||||
been open-sourced.*
|
||||
|
||||
This model was contributed by `kssteven <https://huggingface.co/kssteven>`__. The original code can be found `here
|
||||
<https://github.com/kssteven418/I-BERT>`__.
|
||||
|
||||
|
||||
IBertConfig
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.IBertConfig
|
||||
:members:
|
||||
|
||||
|
||||
IBertModel
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.IBertModel
|
||||
:members: forward
|
||||
|
||||
|
||||
IBertForMaskedLM
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.IBertForMaskedLM
|
||||
:members: forward
|
||||
|
||||
|
||||
IBertForSequenceClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.IBertForSequenceClassification
|
||||
:members: forward
|
||||
|
||||
|
||||
IBertForMultipleChoice
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.IBertForMultipleChoice
|
||||
:members: forward
|
||||
|
||||
|
||||
IBertForTokenClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.IBertForTokenClassification
|
||||
:members: forward
|
||||
|
||||
|
||||
IBertForQuestionAnswering
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.IBertForQuestionAnswering
|
||||
:members: forward
|
||||
@ -56,31 +56,32 @@ Tips:
|
||||
|
||||
.. code-block::
|
||||
|
||||
def normalize_bbox(bbox, width, height):
|
||||
return [
|
||||
int(1000 * (bbox[0] / width)),
|
||||
int(1000 * (bbox[1] / height)),
|
||||
int(1000 * (bbox[2] / width)),
|
||||
int(1000 * (bbox[3] / height)),
|
||||
]
|
||||
def normalize_bbox(bbox, width, height):
|
||||
return [
|
||||
int(1000 * (bbox[0] / width)),
|
||||
int(1000 * (bbox[1] / height)),
|
||||
int(1000 * (bbox[2] / width)),
|
||||
int(1000 * (bbox[3] / height)),
|
||||
]
|
||||
|
||||
Here, :obj:`width` and :obj:`height` correspond to the width and height of the original document in which the token
|
||||
occurs. Those can be obtained using the Python Image Library (PIL) library for example, as follows:
|
||||
|
||||
.. code-block::
|
||||
|
||||
from PIL import Image
|
||||
from PIL import Image
|
||||
|
||||
image = Image.open("name_of_your_document - can be a png file, pdf, etc.")
|
||||
image = Image.open("name_of_your_document - can be a png file, pdf, etc.")
|
||||
|
||||
width, height = image.size
|
||||
width, height = image.size
|
||||
|
||||
- For a demo which shows how to fine-tune :class:`LayoutLMForTokenClassification` on the `FUNSD dataset
|
||||
<https://guillaumejaume.github.io/FUNSD/>`__ (a collection of annotated forms), see `this notebook
|
||||
<https://github.com/NielsRogge/Transformers-Tutorials/blob/master/LayoutLM/Fine_tuning_LayoutLMForTokenClassification_on_FUNSD.ipynb>`__.
|
||||
It includes an inference part, which shows how to use Google's Tesseract on a new document.
|
||||
|
||||
The original code can be found `here <https://github.com/microsoft/unilm/tree/master/layoutlm>`_.
|
||||
This model was contributed by `liminghao1630 <https://huggingface.co/liminghao1630>`__. The original code can be found
|
||||
`here <https://github.com/microsoft/unilm/tree/master/layoutlm>`_.
|
||||
|
||||
|
||||
LayoutLMConfig
|
||||
@ -130,3 +131,31 @@ LayoutLMForTokenClassification
|
||||
|
||||
.. autoclass:: transformers.LayoutLMForTokenClassification
|
||||
:members:
|
||||
|
||||
|
||||
TFLayoutLMModel
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.TFLayoutLMModel
|
||||
:members:
|
||||
|
||||
|
||||
TFLayoutLMForMaskedLM
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.TFLayoutLMForMaskedLM
|
||||
:members:
|
||||
|
||||
|
||||
TFLayoutLMForSequenceClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.TFLayoutLMForSequenceClassification
|
||||
:members:
|
||||
|
||||
|
||||
TFLayoutLMForTokenClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.TFLayoutLMForTokenClassification
|
||||
:members:
|
||||
|
||||
@ -53,6 +53,8 @@ Tips:
|
||||
- A notebook showing how to fine-tune LED, can be accessed `here
|
||||
<https://colab.research.google.com/drive/12LjJazBl7Gam0XBPy_y0CTOJZeZ34c2v?usp=sharing>`__.
|
||||
|
||||
This model was contributed by `patrickvonplaten <https://huggingface.co/patrickvonplaten>`__.
|
||||
|
||||
|
||||
LEDConfig
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
@ -73,8 +75,7 @@ LEDTokenizerFast
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.LEDTokenizerFast
|
||||
:members: build_inputs_with_special_tokens, get_special_tokens_mask,
|
||||
create_token_type_ids_from_sequences, save_vocabulary
|
||||
:members:
|
||||
|
||||
|
||||
LED specific outputs
|
||||
|
||||
@ -40,7 +40,8 @@ Tips:
|
||||
token belongs to which segment. Just separate your segments with the separation token :obj:`tokenizer.sep_token` (or
|
||||
:obj:`</s>`).
|
||||
|
||||
The Authors' code can be found `here <https://github.com/allenai/longformer>`__.
|
||||
This model was contributed by `beltagy <https://huggingface.co/beltagy>`__. The Authors' code can be found `here
|
||||
<https://github.com/allenai/longformer>`__.
|
||||
|
||||
Longformer Self Attention
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
159
docs/source/model_doc/luke.rst
Normal file
159
docs/source/model_doc/luke.rst
Normal file
@ -0,0 +1,159 @@
|
||||
..
|
||||
Copyright 2021 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
LUKE
|
||||
-----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
Overview
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The LUKE model was proposed in `LUKE: Deep Contextualized Entity Representations with Entity-aware Self-attention
|
||||
<https://arxiv.org/abs/2010.01057>`_ by Ikuya Yamada, Akari Asai, Hiroyuki Shindo, Hideaki Takeda and Yuji Matsumoto.
|
||||
It is based on RoBERTa and adds entity embeddings as well as an entity-aware self-attention mechanism, which helps
|
||||
improve performance on various downstream tasks involving reasoning about entities such as named entity recognition,
|
||||
extractive and cloze-style question answering, entity typing, and relation classification.
|
||||
|
||||
The abstract from the paper is the following:
|
||||
|
||||
*Entity representations are useful in natural language tasks involving entities. In this paper, we propose new
|
||||
pretrained contextualized representations of words and entities based on the bidirectional transformer. The proposed
|
||||
model treats words and entities in a given text as independent tokens, and outputs contextualized representations of
|
||||
them. Our model is trained using a new pretraining task based on the masked language model of BERT. The task involves
|
||||
predicting randomly masked words and entities in a large entity-annotated corpus retrieved from Wikipedia. We also
|
||||
propose an entity-aware self-attention mechanism that is an extension of the self-attention mechanism of the
|
||||
transformer, and considers the types of tokens (words or entities) when computing attention scores. The proposed model
|
||||
achieves impressive empirical performance on a wide range of entity-related tasks. In particular, it obtains
|
||||
state-of-the-art results on five well-known datasets: Open Entity (entity typing), TACRED (relation classification),
|
||||
CoNLL-2003 (named entity recognition), ReCoRD (cloze-style question answering), and SQuAD 1.1 (extractive question
|
||||
answering).*
|
||||
|
||||
Tips:
|
||||
|
||||
- This implementation is the same as :class:`~transformers.RobertaModel` with the addition of entity embeddings as well
|
||||
as an entity-aware self-attention mechanism, which improves performance on tasks involving reasoning about entities.
|
||||
- LUKE treats entities as input tokens; therefore, it takes :obj:`entity_ids`, :obj:`entity_attention_mask`,
|
||||
:obj:`entity_token_type_ids` and :obj:`entity_position_ids` as extra input. You can obtain those using
|
||||
:class:`~transformers.LukeTokenizer`.
|
||||
- :class:`~transformers.LukeTokenizer` takes :obj:`entities` and :obj:`entity_spans` (character-based start and end
|
||||
positions of the entities in the input text) as extra input. :obj:`entities` typically consist of [MASK] entities or
|
||||
Wikipedia entities. The brief description when inputting these entities are as follows:
|
||||
|
||||
- *Inputting [MASK] entities to compute entity representations*: The [MASK] entity is used to mask entities to be
|
||||
predicted during pretraining. When LUKE receives the [MASK] entity, it tries to predict the original entity by
|
||||
gathering the information about the entity from the input text. Therefore, the [MASK] entity can be used to address
|
||||
downstream tasks requiring the information of entities in text such as entity typing, relation classification, and
|
||||
named entity recognition.
|
||||
- *Inputting Wikipedia entities to compute knowledge-enhanced token representations*: LUKE learns rich information
|
||||
(or knowledge) about Wikipedia entities during pretraining and stores the information in its entity embedding. By
|
||||
using Wikipedia entities as input tokens, LUKE outputs token representations enriched by the information stored in
|
||||
the embeddings of these entities. This is particularly effective for tasks requiring real-world knowledge, such as
|
||||
question answering.
|
||||
|
||||
- There are three head models for the former use case:
|
||||
|
||||
- :class:`~transformers.LukeForEntityClassification`, for tasks to classify a single entity in an input text such as
|
||||
entity typing, e.g. the `Open Entity dataset <https://www.cs.utexas.edu/~eunsol/html_pages/open_entity.html>`__.
|
||||
This model places a linear head on top of the output entity representation.
|
||||
- :class:`~transformers.LukeForEntityPairClassification`, for tasks to classify the relationship between two entities
|
||||
such as relation classification, e.g. the `TACRED dataset <https://nlp.stanford.edu/projects/tacred/>`__. This
|
||||
model places a linear head on top of the concatenated output representation of the pair of given entities.
|
||||
- :class:`~transformers.LukeForEntitySpanClassification`, for tasks to classify the sequence of entity spans, such as
|
||||
named entity recognition (NER). This model places a linear head on top of the output entity representations. You
|
||||
can address NER using this model by inputting all possible entity spans in the text to the model.
|
||||
|
||||
:class:`~transformers.LukeTokenizer` has a ``task`` argument, which enables you to easily create an input to these
|
||||
head models by specifying ``task="entity_classification"``, ``task="entity_pair_classification"``, or
|
||||
``task="entity_span_classification"``. Please refer to the example code of each head models.
|
||||
|
||||
There are also 3 notebooks available, which showcase how you can reproduce the results as reported in the paper with
|
||||
the HuggingFace implementation of LUKE. They can be found `here
|
||||
<https://github.com/studio-ousia/luke/tree/master/notebooks>`__.
|
||||
|
||||
Example:
|
||||
|
||||
.. code-block::
|
||||
|
||||
>>> from transformers import LukeTokenizer, LukeModel, LukeForEntityPairClassification
|
||||
|
||||
>>> model = LukeModel.from_pretrained("studio-ousia/luke-base")
|
||||
>>> tokenizer = LukeTokenizer.from_pretrained("studio-ousia/luke-base")
|
||||
|
||||
# Example 1: Computing the contextualized entity representation corresponding to the entity mention "Beyoncé"
|
||||
>>> text = "Beyoncé lives in Los Angeles."
|
||||
>>> entity_spans = [(0, 7)] # character-based entity span corresponding to "Beyoncé"
|
||||
>>> inputs = tokenizer(text, entity_spans=entity_spans, add_prefix_space=True, return_tensors="pt")
|
||||
>>> outputs = model(**inputs)
|
||||
>>> word_last_hidden_state = outputs.last_hidden_state
|
||||
>>> entity_last_hidden_state = outputs.entity_last_hidden_state
|
||||
|
||||
# Example 2: Inputting Wikipedia entities to obtain enriched contextualized representations
|
||||
>>> entities = ["Beyoncé", "Los Angeles"] # Wikipedia entity titles corresponding to the entity mentions "Beyoncé" and "Los Angeles"
|
||||
>>> entity_spans = [(0, 7), (17, 28)] # character-based entity spans corresponding to "Beyoncé" and "Los Angeles"
|
||||
>>> inputs = tokenizer(text, entities=entities, entity_spans=entity_spans, add_prefix_space=True, return_tensors="pt")
|
||||
>>> outputs = model(**inputs)
|
||||
>>> word_last_hidden_state = outputs.last_hidden_state
|
||||
>>> entity_last_hidden_state = outputs.entity_last_hidden_state
|
||||
|
||||
# Example 3: Classifying the relationship between two entities using LukeForEntityPairClassification head model
|
||||
>>> model = LukeForEntityPairClassification.from_pretrained("studio-ousia/luke-large-finetuned-tacred")
|
||||
>>> tokenizer = LukeTokenizer.from_pretrained("studio-ousia/luke-large-finetuned-tacred")
|
||||
>>> entity_spans = [(0, 7), (17, 28)] # character-based entity spans corresponding to "Beyoncé" and "Los Angeles"
|
||||
>>> inputs = tokenizer(text, entity_spans=entity_spans, return_tensors="pt")
|
||||
>>> outputs = model(**inputs)
|
||||
>>> logits = outputs.logits
|
||||
>>> predicted_class_idx = int(logits[0].argmax())
|
||||
>>> print("Predicted class:", model.config.id2label[predicted_class_idx])
|
||||
|
||||
This model was contributed by `ikuyamada <https://huggingface.co/ikuyamada>`__ and `nielsr
|
||||
<https://huggingface.co/nielsr>`__. The original code can be found `here <https://github.com/studio-ousia/luke>`__.
|
||||
|
||||
|
||||
LukeConfig
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.LukeConfig
|
||||
:members:
|
||||
|
||||
|
||||
LukeTokenizer
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.LukeTokenizer
|
||||
:members: __call__, save_vocabulary
|
||||
|
||||
|
||||
LukeModel
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.LukeModel
|
||||
:members: forward
|
||||
|
||||
|
||||
LukeForEntityClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.LukeForEntityClassification
|
||||
:members: forward
|
||||
|
||||
|
||||
LukeForEntityPairClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.LukeForEntityPairClassification
|
||||
:members: forward
|
||||
|
||||
|
||||
LukeForEntitySpanClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.LukeForEntitySpanClassification
|
||||
:members: forward
|
||||
@ -52,7 +52,8 @@ Tips:
|
||||
contains self-attention for each respective modality and cross-attention, only the cross attention is returned and
|
||||
both self attention outputs are disregarded.
|
||||
|
||||
The original code can be found `here <https://github.com/airsplay/lxmert>`__.
|
||||
This model was contributed by `eltoto1219 <https://huggingface.co/eltoto1219>`__. The original code can be found `here
|
||||
<https://github.com/airsplay/lxmert>`__.
|
||||
|
||||
|
||||
LxmertConfig
|
||||
|
||||
130
docs/source/model_doc/m2m_100.rst
Normal file
130
docs/source/model_doc/m2m_100.rst
Normal file
@ -0,0 +1,130 @@
|
||||
..
|
||||
Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
M2M100
|
||||
-----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
Overview
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The M2M100 model was proposed in `Beyond English-Centric Multilingual Machine Translation
|
||||
<https://arxiv.org/abs/2010.11125>`__ by Angela Fan, Shruti Bhosale, Holger Schwenk, Zhiyi Ma, Ahmed El-Kishky,
|
||||
Siddharth Goyal, Mandeep Baines, Onur Celebi, Guillaume Wenzek, Vishrav Chaudhary, Naman Goyal, Tom Birch, Vitaliy
|
||||
Liptchinsky, Sergey Edunov, Edouard Grave, Michael Auli, Armand Joulin.
|
||||
|
||||
The abstract from the paper is the following:
|
||||
|
||||
*Existing work in translation demonstrated the potential of massively multilingual machine translation by training a
|
||||
single model able to translate between any pair of languages. However, much of this work is English-Centric by training
|
||||
only on data which was translated from or to English. While this is supported by large sources of training data, it
|
||||
does not reflect translation needs worldwide. In this work, we create a true Many-to-Many multilingual translation
|
||||
model that can translate directly between any pair of 100 languages. We build and open source a training dataset that
|
||||
covers thousands of language directions with supervised data, created through large-scale mining. Then, we explore how
|
||||
to effectively increase model capacity through a combination of dense scaling and language-specific sparse parameters
|
||||
to create high quality models. Our focus on non-English-Centric models brings gains of more than 10 BLEU when directly
|
||||
translating between non-English directions while performing competitively to the best single systems of WMT. We
|
||||
open-source our scripts so that others may reproduce the data, evaluation, and final M2M-100 model.*
|
||||
|
||||
This model was contributed by `valhalla <https://huggingface.co/valhalla>`__.
|
||||
|
||||
|
||||
Training and Generation
|
||||
_______________________________________________________________________________________________________________________
|
||||
|
||||
M2M100 is a multilingual encoder-decoder (seq-to-seq) model primarily intended for translation tasks. As the model is
|
||||
multilingual it expects the sequences in a certain format: A special language id token is used as prefix in both the
|
||||
source and target text. The source text format is :obj:`[lang_code] X [eos]`, where :obj:`lang_code` is source language
|
||||
id for source text and target language id for target text, with :obj:`X` being the source or target text.
|
||||
|
||||
The :class:`~transformers.M2M100Tokenizer` depends on :obj:`sentencepiece` so be sure to install it before running the
|
||||
examples. To install :obj:`sentencepiece` run ``pip install sentencepiece``.
|
||||
|
||||
- Supervised Training
|
||||
|
||||
.. code-block::
|
||||
|
||||
from transformers import M2M100Config, M2M100ForConditionalGeneration, M2M100Tokenizer
|
||||
|
||||
model = M2M100ForConditionalGeneration.from_pretrained('facebook/m2m100_418M')
|
||||
tokenizer = M2M100Tokenizer.from_pretrained('facebook/m2m100_418M', src_lang="en", tgt_lang="fr")
|
||||
|
||||
src_text = "Life is like a box of chocolates."
|
||||
tgt_lang = "La vie est comme une boîte de chocolat."
|
||||
|
||||
model_inputs = tokenizer(src_text, return_tensors="pt")
|
||||
with tokenizer.as_target_tokenizer():
|
||||
labels = tokenizer(tgt_text, return_tensors="pt").input_ids
|
||||
|
||||
loss = model(**model_inputs, labels=labels) # forward pass
|
||||
|
||||
|
||||
- Generation
|
||||
|
||||
M2M100 uses the :obj:`eos_token_id` as the :obj:`decoder_start_token_id` for generation with the target language id
|
||||
being forced as the first generated token. To force the target language id as the first generated token, pass the
|
||||
`forced_bos_token_id` parameter to the `generate` method. The following example shows how to translate between
|
||||
Hindi to French and Chinese to English using the `facebook/m2m100_418M` checkpoint.
|
||||
|
||||
.. code-block::
|
||||
|
||||
>>> from transformers import M2M100ForConditionalGeneration, M2M100Tokenizer
|
||||
|
||||
>>> hi_text = "जीवन एक चॉकलेट बॉक्स की तरह है।"
|
||||
>>> chinese_text = "生活就像一盒巧克力。"
|
||||
|
||||
>>> model = M2M100ForConditionalGeneration.from_pretrained("facebook/m2m100_418M")
|
||||
>>> tokenizer = M2M100Tokenizer.from_pretrained("facebook/m2m100_418M")
|
||||
|
||||
>>> # translate Hindi to French
|
||||
>>> tokenizer.src_lang = "hi"
|
||||
>>> encoded_hi = tokenizer(hi_text, return_tensors="pt")
|
||||
>>> generated_tokens = model.generate(**encoded_hi, forced_bos_token_id=tokenizer.get_lang_id("fr"))
|
||||
>>> tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
|
||||
"La vie est comme une boîte de chocolat."
|
||||
|
||||
>>> # translate Chinese to English
|
||||
>>> tokenizer.src_lang = "zh"
|
||||
>>> encoded_zh = tokenizer(chinese_text, return_tensors="pt")
|
||||
>>> generated_tokens = model.generate(**encoded_zh, forced_bos_token_id=tokenizer.get_lang_id("en"))
|
||||
>>> tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
|
||||
"Life is like a box of chocolate."
|
||||
|
||||
|
||||
M2M100Config
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.M2M100Config
|
||||
:members:
|
||||
|
||||
|
||||
M2M100Tokenizer
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.M2M100Tokenizer
|
||||
:members: build_inputs_with_special_tokens, get_special_tokens_mask,
|
||||
create_token_type_ids_from_sequences, save_vocabulary
|
||||
|
||||
|
||||
M2M100Model
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.M2M100Model
|
||||
:members: forward
|
||||
|
||||
|
||||
M2M100ForConditionalGeneration
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.M2M100ForConditionalGeneration
|
||||
:members: forward
|
||||
|
||||
|
||||
@ -37,6 +37,7 @@ Implementation Notes
|
||||
- the model starts generating with :obj:`pad_token_id` (which has 0 as a token_embedding) as the prefix (Bart uses
|
||||
:obj:`<s/>`),
|
||||
- Code to bulk convert models can be found in ``convert_marian_to_pytorch.py``.
|
||||
- This model was contributed by `sshleifer <https://huggingface.co/sshleifer>`__.
|
||||
|
||||
Naming
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
@ -76,27 +77,29 @@ require 3 character language codes:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from transformers import MarianMTModel, MarianTokenizer
|
||||
src_text = [
|
||||
'>>fra<< this is a sentence in english that we want to translate to french',
|
||||
'>>por<< This should go to portuguese',
|
||||
'>>esp<< And this to Spanish'
|
||||
]
|
||||
>>> from transformers import MarianMTModel, MarianTokenizer
|
||||
>>> src_text = [
|
||||
... '>>fra<< this is a sentence in english that we want to translate to french',
|
||||
... '>>por<< This should go to portuguese',
|
||||
... '>>esp<< And this to Spanish'
|
||||
>>> ]
|
||||
|
||||
model_name = 'Helsinki-NLP/opus-mt-en-roa'
|
||||
tokenizer = MarianTokenizer.from_pretrained(model_name)
|
||||
print(tokenizer.supported_language_codes)
|
||||
model = MarianMTModel.from_pretrained(model_name)
|
||||
translated = model.generate(**tokenizer.prepare_seq2seq_batch(src_text, return_tensors="pt"))
|
||||
tgt_text = [tokenizer.decode(t, skip_special_tokens=True) for t in translated]
|
||||
# ["c'est une phrase en anglais que nous voulons traduire en français",
|
||||
# 'Isto deve ir para o português.',
|
||||
# 'Y esto al español']
|
||||
>>> model_name = 'Helsinki-NLP/opus-mt-en-roa'
|
||||
>>> tokenizer = MarianTokenizer.from_pretrained(model_name)
|
||||
>>> print(tokenizer.supported_language_codes)
|
||||
['>>zlm_Latn<<', '>>mfe<<', '>>hat<<', '>>pap<<', '>>ast<<', '>>cat<<', '>>ind<<', '>>glg<<', '>>wln<<', '>>spa<<', '>>fra<<', '>>ron<<', '>>por<<', '>>ita<<', '>>oci<<', '>>arg<<', '>>min<<']
|
||||
|
||||
>>> model = MarianMTModel.from_pretrained(model_name)
|
||||
>>> translated = model.generate(**tokenizer(src_text, return_tensors="pt", padding=True))
|
||||
>>> [tokenizer.decode(t, skip_special_tokens=True) for t in translated]
|
||||
["c'est une phrase en anglais que nous voulons traduire en français",
|
||||
'Isto deve ir para o português.',
|
||||
'Y esto al español']
|
||||
|
||||
|
||||
|
||||
|
||||
Code to see available pretrained models:
|
||||
Here is the code to see all available pretrained models on the hub:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
@ -147,21 +150,22 @@ Example of translating english to many romance languages, using old-style 2 char
|
||||
|
||||
.. code-block::python
|
||||
|
||||
from transformers import MarianMTModel, MarianTokenizer
|
||||
src_text = [
|
||||
'>>fr<< this is a sentence in english that we want to translate to french',
|
||||
'>>pt<< This should go to portuguese',
|
||||
'>>es<< And this to Spanish'
|
||||
]
|
||||
>>> from transformers import MarianMTModel, MarianTokenizer
|
||||
>>> src_text = [
|
||||
... '>>fr<< this is a sentence in english that we want to translate to french',
|
||||
... '>>pt<< This should go to portuguese',
|
||||
... '>>es<< And this to Spanish'
|
||||
>>> ]
|
||||
|
||||
model_name = 'Helsinki-NLP/opus-mt-en-ROMANCE'
|
||||
tokenizer = MarianTokenizer.from_pretrained(model_name)
|
||||
print(tokenizer.supported_language_codes)
|
||||
>>> model_name = 'Helsinki-NLP/opus-mt-en-ROMANCE'
|
||||
>>> tokenizer = MarianTokenizer.from_pretrained(model_name)
|
||||
|
||||
model = MarianMTModel.from_pretrained(model_name)
|
||||
translated = model.generate(**tokenizer.prepare_seq2seq_batch(src_text, return_tensors="pt"))
|
||||
tgt_text = [tokenizer.decode(t, skip_special_tokens=True) for t in translated]
|
||||
# ["c'est une phrase en anglais que nous voulons traduire en français", 'Isto deve ir para o português.', 'Y esto al español']
|
||||
>>> model = MarianMTModel.from_pretrained(model_name)
|
||||
>>> translated = model.generate(**tokenizer(src_text, return_tensors="pt", padding=True))
|
||||
>>> tgt_text = [tokenizer.decode(t, skip_special_tokens=True) for t in translated]
|
||||
["c'est une phrase en anglais que nous voulons traduire en français",
|
||||
'Isto deve ir para o português.',
|
||||
'Y esto al español']
|
||||
|
||||
|
||||
|
||||
@ -176,7 +180,7 @@ MarianTokenizer
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.MarianTokenizer
|
||||
:members: prepare_seq2seq_batch
|
||||
:members: as_target_tokenizer
|
||||
|
||||
|
||||
MarianModel
|
||||
|
||||
@ -10,14 +10,14 @@
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
MBart
|
||||
MBart and MBart-50
|
||||
-----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
**DISCLAIMER:** If you see something strange, file a `Github Issue
|
||||
<https://github.com/huggingface/transformers/issues/new?assignees=&labels=&template=bug-report.md&title>`__ and assign
|
||||
@patrickvonplaten
|
||||
|
||||
Overview
|
||||
Overview of MBart
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The MBart model was presented in `Multilingual Denoising Pre-training for Neural Machine Translation
|
||||
@ -29,35 +29,37 @@ corpora in many languages using the BART objective. mBART is one of the first me
|
||||
sequence-to-sequence model by denoising full texts in multiple languages, while previous approaches have focused only
|
||||
on the encoder, decoder, or reconstructing parts of the text.
|
||||
|
||||
The Authors' code can be found `here <https://github.com/pytorch/fairseq/tree/master/examples/mbart>`__
|
||||
This model was contributed by `valhalla <https://huggingface.co/valhalla>`__. The Authors' code can be found `here
|
||||
<https://github.com/pytorch/fairseq/tree/master/examples/mbart>`__
|
||||
|
||||
Examples
|
||||
Training of MBart
|
||||
_______________________________________________________________________________________________________________________
|
||||
|
||||
- Examples and scripts for fine-tuning mBART and other models for sequence to sequence tasks can be found in
|
||||
:prefix_link:`examples/seq2seq/ <examples/seq2seq/README.md>`.
|
||||
- Given the large embeddings table, mBART consumes a large amount of GPU RAM, especially for fine-tuning.
|
||||
:class:`MarianMTModel` is usually a better choice for bilingual machine translation.
|
||||
MBart is a multilingual encoder-decoder (sequence-to-sequence) model primarily intended for translation task. As the
|
||||
model is multilingual it expects the sequences in a different format. A special language id token is added in both the
|
||||
source and target text. The source text format is :obj:`X [eos, src_lang_code]` where :obj:`X` is the source text. The
|
||||
target text format is :obj:`[tgt_lang_code] X [eos]`. :obj:`bos` is never used.
|
||||
|
||||
Training
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
MBart is a multilingual encoder-decoder (seq-to-seq) model primarily intended for translation task. As the model is
|
||||
multilingual it expects the sequences in a different format. A special language id token is added in both the source
|
||||
and target text. The source text format is :obj:`X [eos, src_lang_code]` where :obj:`X` is the source text. The target
|
||||
text format is :obj:`[tgt_lang_code] X [eos]`. :obj:`bos` is never used.
|
||||
|
||||
The :meth:`~transformers.MBartTokenizer.prepare_seq2seq_batch` handles this automatically and should be used to encode
|
||||
the sequences for sequence-to-sequence fine-tuning.
|
||||
The regular :meth:`~transformers.MBartTokenizer.__call__` will encode source text format, and it should be wrapped
|
||||
inside the context manager :meth:`~transformers.MBartTokenizer.as_target_tokenizer` to encode target text format.
|
||||
|
||||
- Supervised training
|
||||
|
||||
.. code-block::
|
||||
|
||||
example_english_phrase = "UN Chief Says There Is No Military Solution in Syria"
|
||||
expected_translation_romanian = "Şeful ONU declară că nu există o soluţie militară în Siria"
|
||||
batch = tokenizer.prepare_seq2seq_batch(example_english_phrase, src_lang="en_XX", tgt_lang="ro_RO", tgt_texts=expected_translation_romanian, return_tensors="pt")
|
||||
model(input_ids=batch['input_ids'], labels=batch['labels']) # forward pass
|
||||
>>> from transformers import MBartForConditionalGeneration, MBartTokenizer
|
||||
|
||||
>>> tokenizer = MBartTokenizer.from_pretrained("facebook/mbart-large-en-ro")
|
||||
>>> example_english_phrase = "UN Chief Says There Is No Military Solution in Syria"
|
||||
>>> expected_translation_romanian = "Şeful ONU declară că nu există o soluţie militară în Siria"
|
||||
|
||||
>>> inputs = tokenizer(example_english_phrase, return_tensors="pt", src_lang="en_XX", tgt_lang="ro_RO")
|
||||
>>> with tokenizer.as_target_tokenizer():
|
||||
... labels = tokenizer(expected_translation_romanian, return_tensors="pt")
|
||||
|
||||
>>> model = MBartForConditionalGeneration.from_pretrained("facebook/mbart-large-en-ro")
|
||||
>>> # forward pass
|
||||
>>> model(**inputs, labels=batch['labels'])
|
||||
|
||||
- Generation
|
||||
|
||||
@ -66,14 +68,95 @@ the sequences for sequence-to-sequence fine-tuning.
|
||||
|
||||
.. code-block::
|
||||
|
||||
from transformers import MBartForConditionalGeneration, MBartTokenizer
|
||||
model = MBartForConditionalGeneration.from_pretrained("facebook/mbart-large-en-ro")
|
||||
tokenizer = MBartTokenizer.from_pretrained("facebook/mbart-large-en-ro")
|
||||
article = "UN Chief Says There Is No Military Solution in Syria"
|
||||
batch = tokenizer.prepare_seq2seq_batch(src_texts=[article], src_lang="en_XX", return_tensors="pt")
|
||||
translated_tokens = model.generate(**batch, decoder_start_token_id=tokenizer.lang_code_to_id["ro_RO"])
|
||||
translation = tokenizer.batch_decode(translated_tokens, skip_special_tokens=True)[0]
|
||||
assert translation == "Şeful ONU declară că nu există o soluţie militară în Siria"
|
||||
>>> from transformers import MBartForConditionalGeneration, MBartTokenizer
|
||||
|
||||
>>> tokenizer = MBartTokenizer.from_pretrained("facebook/mbart-large-en-ro", src_lang="en_XX")
|
||||
>>> article = "UN Chief Says There Is No Military Solution in Syria"
|
||||
>>> inputs = tokenizer(article, return_tensors="pt")
|
||||
>>> translated_tokens = model.generate(**inputs, decoder_start_token_id=tokenizer.lang_code_to_id["ro_RO"])
|
||||
>>> tokenizer.batch_decode(translated_tokens, skip_special_tokens=True)[0]
|
||||
"Şeful ONU declară că nu există o soluţie militară în Siria"
|
||||
|
||||
|
||||
Overview of MBart-50
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
MBart-50 was introduced in the `Multilingual Translation with Extensible Multilingual Pretraining and Finetuning
|
||||
<https://arxiv.org/abs/2008.00401>` paper by Yuqing Tang, Chau Tran, Xian Li, Peng-Jen Chen, Naman Goyal, Vishrav
|
||||
Chaudhary, Jiatao Gu, Angela Fan. MBart-50 is created using the original `mbart-large-cc25` checkpoint by extendeding
|
||||
its embedding layers with randomly initialized vectors for an extra set of 25 language tokens and then pretrained on 50
|
||||
languages.
|
||||
|
||||
According to the abstract
|
||||
|
||||
*Multilingual translation models can be created through multilingual finetuning. Instead of finetuning on one
|
||||
direction, a pretrained model is finetuned on many directions at the same time. It demonstrates that pretrained models
|
||||
can be extended to incorporate additional languages without loss of performance. Multilingual finetuning improves on
|
||||
average 1 BLEU over the strongest baselines (being either multilingual from scratch or bilingual finetuning) while
|
||||
improving 9.3 BLEU on average over bilingual baselines from scratch.*
|
||||
|
||||
|
||||
Training of MBart-50
|
||||
_______________________________________________________________________________________________________________________
|
||||
|
||||
The text format for MBart-50 is slightly different from mBART. For MBart-50 the language id token is used as a prefix
|
||||
for both source and target text i.e the text format is :obj:`[lang_code] X [eos]`, where :obj:`lang_code` is source
|
||||
language id for source text and target language id for target text, with :obj:`X` being the source or target text
|
||||
respectively.
|
||||
|
||||
|
||||
MBart-50 has its own tokenizer :class:`~transformers.MBart50Tokenizer`.
|
||||
|
||||
- Supervised training
|
||||
|
||||
.. code-block::
|
||||
|
||||
from transformers import MBartForConditionalGeneration, MBart50TokenizerFast
|
||||
|
||||
model = MBartForConditionalGeneration.from_pretrained("facebook/mbart-large-50")
|
||||
tokenizer = MBart50TokenizerFast.from_pretrained("facebook/mbart-large-50", src_lang="en_XX", tgt_lang="ro_RO")
|
||||
|
||||
src_text = " UN Chief Says There Is No Military Solution in Syria"
|
||||
tgt_text = "Şeful ONU declară că nu există o soluţie militară în Siria"
|
||||
|
||||
model_inputs = tokenizer(src_text, return_tensors="pt")
|
||||
with tokenizer.as_target_tokenizer():
|
||||
labels = tokenizer(tgt_text, return_tensors="pt").input_ids
|
||||
|
||||
model(**model_inputs, labels=labels) # forward pass
|
||||
|
||||
|
||||
- Generation
|
||||
|
||||
To generate using the mBART-50 multilingual translation models, :obj:`eos_token_id` is used as the
|
||||
:obj:`decoder_start_token_id` and the target language id is forced as the first generated token. To force the
|
||||
target language id as the first generated token, pass the `forced_bos_token_id` parameter to the `generate` method.
|
||||
The following example shows how to translate between Hindi to French and Arabic to English using the
|
||||
`facebook/mbart-50-large-many-to-many` checkpoint.
|
||||
|
||||
.. code-block::
|
||||
|
||||
from transformers import MBartForConditionalGeneration, MBart50TokenizerFast
|
||||
|
||||
article_hi = "संयुक्त राष्ट्र के प्रमुख का कहना है कि सीरिया में कोई सैन्य समाधान नहीं है"
|
||||
article_ar = "الأمين العام للأمم المتحدة يقول إنه لا يوجد حل عسكري في سوريا."
|
||||
|
||||
model = MBartForConditionalGeneration.from_pretrained("facebook/mbart-large-50-many-to-many-mmt")
|
||||
tokenizer = MBart50TokenizerFast.from_pretrained("facebook/mbart-large-50-many-to-many-mmt")
|
||||
|
||||
# translate Hindi to French
|
||||
tokenizer.src_lang = "hi_IN"
|
||||
encoded_hi = tokenizer(article_hi, return_tensors="pt")
|
||||
generated_tokens = model.generate(**encoded_hi, forced_bos_token_id=tokenizer.lang_code_to_id["fr_XX"])
|
||||
tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
|
||||
# => "Le chef de l 'ONU affirme qu 'il n 'y a pas de solution militaire en Syria."
|
||||
|
||||
# translate Arabic to English
|
||||
tokenizer.src_lang = "ar_AR"
|
||||
encoded_ar = tokenizer(article_ar, return_tensors="pt")
|
||||
generated_tokens = model.generate(**encoded_ar, forced_bos_token_id=tokenizer.lang_code_to_id["en_XX"])
|
||||
tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
|
||||
# => "The Secretary-General of the United Nations says there is no military solution in Syria."
|
||||
|
||||
|
||||
MBartConfig
|
||||
@ -87,7 +170,7 @@ MBartTokenizer
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.MBartTokenizer
|
||||
:members: build_inputs_with_special_tokens, prepare_seq2seq_batch
|
||||
:members: as_target_tokenizer, build_inputs_with_special_tokens
|
||||
|
||||
|
||||
MBartTokenizerFast
|
||||
@ -97,6 +180,20 @@ MBartTokenizerFast
|
||||
:members:
|
||||
|
||||
|
||||
MBart50Tokenizer
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.MBart50Tokenizer
|
||||
:members:
|
||||
|
||||
|
||||
MBart50TokenizerFast
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.MBart50TokenizerFast
|
||||
:members:
|
||||
|
||||
|
||||
MBartModel
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
|
||||
154
docs/source/model_doc/megatron_bert.rst
Normal file
154
docs/source/model_doc/megatron_bert.rst
Normal file
@ -0,0 +1,154 @@
|
||||
..
|
||||
Copyright 2021 NVIDIA Corporation and The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
MegatronBERT
|
||||
-----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
Overview
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The MegatronBERT model was proposed in `Megatron-LM: Training Multi-Billion Parameter Language Models Using Model
|
||||
Parallelism <https://arxiv.org/abs/1909.08053>`__ by Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley,
|
||||
Jared Casper and Bryan Catanzaro.
|
||||
|
||||
The abstract from the paper is the following:
|
||||
|
||||
*Recent work in language modeling demonstrates that training large transformer models advances the state of the art in
|
||||
Natural Language Processing applications. However, very large models can be quite difficult to train due to memory
|
||||
constraints. In this work, we present our techniques for training very large transformer models and implement a simple,
|
||||
efficient intra-layer model parallel approach that enables training transformer models with billions of parameters. Our
|
||||
approach does not require a new compiler or library changes, is orthogonal and complimentary to pipeline model
|
||||
parallelism, and can be fully implemented with the insertion of a few communication operations in native PyTorch. We
|
||||
illustrate this approach by converging transformer based models up to 8.3 billion parameters using 512 GPUs. We sustain
|
||||
15.1 PetaFLOPs across the entire application with 76% scaling efficiency when compared to a strong single GPU baseline
|
||||
that sustains 39 TeraFLOPs, which is 30% of peak FLOPs. To demonstrate that large language models can further advance
|
||||
the state of the art (SOTA), we train an 8.3 billion parameter transformer language model similar to GPT-2 and a 3.9
|
||||
billion parameter model similar to BERT. We show that careful attention to the placement of layer normalization in
|
||||
BERT-like models is critical to achieving increased performance as the model size grows. Using the GPT-2 model we
|
||||
achieve SOTA results on the WikiText103 (10.8 compared to SOTA perplexity of 15.8) and LAMBADA (66.5% compared to SOTA
|
||||
accuracy of 63.2%) datasets. Our BERT model achieves SOTA results on the RACE dataset (90.9% compared to SOTA accuracy
|
||||
of 89.4%).*
|
||||
|
||||
Tips:
|
||||
|
||||
We have provided pretrained `BERT-345M <https://ngc.nvidia.com/catalog/models/nvidia:megatron_bert_345m>`__ checkpoints
|
||||
for use to evaluate or finetuning downstream tasks.
|
||||
|
||||
To access these checkpoints, first `sign up <https://ngc.nvidia.com/signup>`__ for and setup the NVIDIA GPU Cloud (NGC)
|
||||
Registry CLI. Further documentation for downloading models can be found in the `NGC documentation
|
||||
<https://docs.nvidia.com/dgx/ngc-registry-cli-user-guide/index.html#topic_6_4_1>`__.
|
||||
|
||||
Alternatively, you can directly download the checkpoints using:
|
||||
|
||||
BERT-345M-uncased::
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
wget --content-disposition https://api.ngc.nvidia.com/v2/models/nvidia/megatron_bert_345m/versions/v0.1_uncased/zip
|
||||
-O megatron_bert_345m_v0_1_uncased.zip
|
||||
|
||||
BERT-345M-cased::
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
wget --content-disposition https://api.ngc.nvidia.com/v2/models/nvidia/megatron_bert_345m/versions/v0.1_cased/zip -O
|
||||
megatron_bert_345m_v0_1_cased.zip
|
||||
|
||||
Once you have obtained the checkpoints from NVIDIA GPU Cloud (NGC), you have to convert them to a format that will
|
||||
easily be loaded by Hugging Face Transformers and our port of the BERT code.
|
||||
|
||||
The following commands allow you to do the conversion. We assume that the folder ``models/megatron_bert`` contains
|
||||
``megatron_bert_345m_v0_1_{cased, uncased}.zip`` and that the commands are run from inside that folder::
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
python3 $PATH_TO_TRANSFORMERS/models/megatron_bert/convert_megatron_bert_checkpoint.py megatron_bert_345m_v0_1_uncased.zip
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
python3 $PATH_TO_TRANSFORMERS/models/megatron_bert/convert_megatron_bert_checkpoint.py megatron_bert_345m_v0_1_cased.zip
|
||||
|
||||
This model was contributed by `jdemouth <https://huggingface.co/jdemouth>`__. The original code can be found `here
|
||||
<https://github.com/NVIDIA/Megatron-LM>`__. That repository contains a multi-GPU and multi-node implementation of the
|
||||
Megatron Language models. In particular, it contains a hybrid model parallel approach using "tensor parallel" and
|
||||
"pipeline parallel" techniques.
|
||||
|
||||
MegatronBertConfig
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.MegatronBertConfig
|
||||
:members:
|
||||
|
||||
|
||||
MegatronBertModel
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.MegatronBertModel
|
||||
:members: forward
|
||||
|
||||
|
||||
MegatronBertForMaskedLM
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.MegatronBertForMaskedLM
|
||||
:members: forward
|
||||
|
||||
|
||||
MegatronBertForCausalLM
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.MegatronBertForCausalLM
|
||||
:members: forward
|
||||
|
||||
|
||||
MegatronBertForNextSentencePrediction
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.MegatronBertForNextSentencePrediction
|
||||
:members: forward
|
||||
|
||||
|
||||
MegatronBertForPreTraining
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.MegatronBertForPreTraining
|
||||
:members: forward
|
||||
|
||||
|
||||
MegatronBertForSequenceClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.MegatronBertForSequenceClassification
|
||||
:members: forward
|
||||
|
||||
|
||||
MegatronBertForMultipleChoice
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.MegatronBertForMultipleChoice
|
||||
:members: forward
|
||||
|
||||
|
||||
MegatronBertForTokenClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.MegatronBertForTokenClassification
|
||||
:members: forward
|
||||
|
||||
|
||||
MegatronBertForQuestionAnswering
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.MegatronBertForQuestionAnswering
|
||||
:members: forward
|
||||
|
||||
|
||||
71
docs/source/model_doc/megatron_gpt2.rst
Normal file
71
docs/source/model_doc/megatron_gpt2.rst
Normal file
@ -0,0 +1,71 @@
|
||||
..
|
||||
Copyright 2021 NVIDIA Corporation and The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
MegatronGPT2
|
||||
-----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
Overview
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The MegatronGPT2 model was proposed in `Megatron-LM: Training Multi-Billion Parameter Language Models Using Model
|
||||
Parallelism <https://arxiv.org/abs/1909.08053>`__ by Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley,
|
||||
Jared Casper and Bryan Catanzaro.
|
||||
|
||||
The abstract from the paper is the following:
|
||||
|
||||
*Recent work in language modeling demonstrates that training large transformer models advances the state of the art in
|
||||
Natural Language Processing applications. However, very large models can be quite difficult to train due to memory
|
||||
constraints. In this work, we present our techniques for training very large transformer models and implement a simple,
|
||||
efficient intra-layer model parallel approach that enables training transformer models with billions of parameters. Our
|
||||
approach does not require a new compiler or library changes, is orthogonal and complimentary to pipeline model
|
||||
parallelism, and can be fully implemented with the insertion of a few communication operations in native PyTorch. We
|
||||
illustrate this approach by converging transformer based models up to 8.3 billion parameters using 512 GPUs. We sustain
|
||||
15.1 PetaFLOPs across the entire application with 76% scaling efficiency when compared to a strong single GPU baseline
|
||||
that sustains 39 TeraFLOPs, which is 30% of peak FLOPs. To demonstrate that large language models can further advance
|
||||
the state of the art (SOTA), we train an 8.3 billion parameter transformer language model similar to GPT-2 and a 3.9
|
||||
billion parameter model similar to BERT. We show that careful attention to the placement of layer normalization in
|
||||
BERT-like models is critical to achieving increased performance as the model size grows. Using the GPT-2 model we
|
||||
achieve SOTA results on the WikiText103 (10.8 compared to SOTA perplexity of 15.8) and LAMBADA (66.5% compared to SOTA
|
||||
accuracy of 63.2%) datasets. Our BERT model achieves SOTA results on the RACE dataset (90.9% compared to SOTA accuracy
|
||||
of 89.4%).*
|
||||
|
||||
Tips:
|
||||
|
||||
We have provided pretrained `GPT2-345M <https://ngc.nvidia.com/catalog/models/nvidia:megatron_lm_345m>`__ checkpoints
|
||||
for use to evaluate or finetuning downstream tasks.
|
||||
|
||||
To access these checkpoints, first `sign up <https://ngc.nvidia.com/signup>`__ for and setup the NVIDIA GPU Cloud (NGC)
|
||||
Registry CLI. Further documentation for downloading models can be found in the `NGC documentation
|
||||
<https://docs.nvidia.com/dgx/ngc-registry-cli-user-guide/index.html#topic_6_4_1>`__.
|
||||
|
||||
Alternatively, you can directly download the checkpoints using::
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
wget --content-disposition https://api.ngc.nvidia.com/v2/models/nvidia/megatron_lm_345m/versions/v0.0/zip -O
|
||||
megatron_gpt2_345m_v0_0.zip
|
||||
|
||||
Once you have obtained the checkpoint from NVIDIA GPU Cloud (NGC), you have to convert it to a format that will easily
|
||||
be loaded by Hugging Face Transformers GPT2 implementation.
|
||||
|
||||
The following command allows you to do the conversion. We assume that the folder ``models/megatron_gpt2`` contains
|
||||
``megatron_gpt2_345m_v0_0.zip`` and that the command is run from that folder::
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
python3 $PATH_TO_TRANSFORMERS/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py megatron_gpt2_345m_v0_0.zip
|
||||
|
||||
This model was contributed by `jdemouth <https://huggingface.co/jdemouth>`__. The original code can be found `here
|
||||
<https://github.com/NVIDIA/Megatron-LM>`__. That repository contains a multi-GPU and multi-node implementation of the
|
||||
Megatron Language models. In particular, it contains a hybrid model parallel approach using "tensor parallel" and
|
||||
"pipeline parallel" techniques.
|
||||
|
||||
@ -44,7 +44,8 @@ Tips:
|
||||
efficient at predicting masked tokens and at NLU in general, but is not optimal for text generation. Models trained
|
||||
with a causal language modeling (CLM) objective are better in that regard.
|
||||
|
||||
The original code can be found `here <https://github.com/google-research/mobilebert>`__.
|
||||
This model was contributed by `vshampor <https://huggingface.co/vshampor>`__. The original code can be found `here
|
||||
<https://github.com/google-research/mobilebert>`__.
|
||||
|
||||
MobileBertConfig
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
@ -28,7 +28,8 @@ multilingual variant of T5 that was pre-trained on a new Common Crawl-based data
|
||||
the design and modified training of mT5 and demonstrate its state-of-the-art performance on many multilingual
|
||||
benchmarks. All of the code and model checkpoints*
|
||||
|
||||
The original code can be found `here <https://github.com/google-research/multilingual-t5>`__.
|
||||
This model was contributed by `patrickvonplaten <https://huggingface.co/patrickvonplaten>`__. The original code can be
|
||||
found `here <https://github.com/google-research/multilingual-t5>`__.
|
||||
|
||||
MT5Config
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
@ -31,7 +31,8 @@ According to the abstract,
|
||||
extractive summary.
|
||||
- Pegasus achieves SOTA summarization performance on all 12 downstream tasks, as measured by ROUGE and human eval.
|
||||
|
||||
The Authors' code can be found `here <https://github.com/google-research/pegasus>`__.
|
||||
This model was contributed by `sshleifer <https://huggingface.co/sshleifer>`__. The Authors' code can be found `here
|
||||
<https://github.com/google-research/pegasus>`__.
|
||||
|
||||
|
||||
Checkpoints
|
||||
@ -51,8 +52,9 @@ All the `checkpoints <https://huggingface.co/models?search=pegasus>`__ are fine-
|
||||
Examples
|
||||
_______________________________________________________________________________________________________________________
|
||||
|
||||
- :prefix_link:`Script <examples/seq2seq/finetune_pegasus_xsum.sh>` to fine-tune pegasus on the XSUM dataset. Data
|
||||
download instructions at :prefix_link:`examples/seq2seq/ <examples/seq2seq/README.md>`.
|
||||
- :prefix_link:`Script <examples/research_projects/seq2seq-distillation/finetune_pegasus_xsum.sh>` to fine-tune pegasus
|
||||
on the XSUM dataset. Data download instructions at :prefix_link:`examples/pytorch/summarization/
|
||||
<examples/pytorch/summarization/README.md>`.
|
||||
- FP16 is not supported (help/ideas on this appreciated!).
|
||||
- The adafactor optimizer is recommended for pegasus fine-tuning.
|
||||
|
||||
@ -78,20 +80,20 @@ Usage Example
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from transformers import PegasusForConditionalGeneration, PegasusTokenizer
|
||||
import torch
|
||||
src_text = [
|
||||
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow."""
|
||||
]
|
||||
>>> from transformers import PegasusForConditionalGeneration, PegasusTokenizer
|
||||
>>> import torch
|
||||
>>> src_text = [
|
||||
... """ PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow."""
|
||||
>>> ]
|
||||
|
||||
model_name = 'google/pegasus-xsum'
|
||||
torch_device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
||||
tokenizer = PegasusTokenizer.from_pretrained(model_name)
|
||||
model = PegasusForConditionalGeneration.from_pretrained(model_name).to(torch_device)
|
||||
batch = tokenizer.prepare_seq2seq_batch(src_text, truncation=True, padding='longest', return_tensors="pt").to(torch_device)
|
||||
translated = model.generate(**batch)
|
||||
tgt_text = tokenizer.batch_decode(translated, skip_special_tokens=True)
|
||||
assert tgt_text[0] == "California's largest electricity provider has turned off power to hundreds of thousands of customers."
|
||||
>>> model_name = 'google/pegasus-xsum'
|
||||
>>> device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
||||
>>> tokenizer = PegasusTokenizer.from_pretrained(model_name)
|
||||
>>> model = PegasusForConditionalGeneration.from_pretrained(model_name).to(device)
|
||||
>>> batch = tokenizer(src_text, truncation=True, padding='longest', return_tensors="pt").to(torch_device)
|
||||
>>> translated = model.generate(**batch)
|
||||
>>> tgt_text = tokenizer.batch_decode(translated, skip_special_tokens=True)
|
||||
>>> assert tgt_text[0] == "California's largest electricity provider has turned off power to hundreds of thousands of customers."
|
||||
|
||||
|
||||
|
||||
@ -107,7 +109,7 @@ PegasusTokenizer
|
||||
warning: ``add_tokens`` does not work at the moment.
|
||||
|
||||
.. autoclass:: transformers.PegasusTokenizer
|
||||
:members: __call__, prepare_seq2seq_batch
|
||||
:members:
|
||||
|
||||
|
||||
PegasusTokenizerFast
|
||||
|
||||
@ -31,26 +31,26 @@ Example of use:
|
||||
|
||||
.. code-block::
|
||||
|
||||
import torch
|
||||
from transformers import AutoModel, AutoTokenizer
|
||||
>>> import torch
|
||||
>>> from transformers import AutoModel, AutoTokenizer
|
||||
|
||||
phobert = AutoModel.from_pretrained("vinai/phobert-base")
|
||||
tokenizer = AutoTokenizer.from_pretrained("vinai/phobert-base")
|
||||
>>> phobert = AutoModel.from_pretrained("vinai/phobert-base")
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("vinai/phobert-base")
|
||||
|
||||
# INPUT TEXT MUST BE ALREADY WORD-SEGMENTED!
|
||||
line = "Tôi là sinh_viên trường đại_học Công_nghệ ."
|
||||
>>> # INPUT TEXT MUST BE ALREADY WORD-SEGMENTED!
|
||||
>>> line = "Tôi là sinh_viên trường đại_học Công_nghệ ."
|
||||
|
||||
input_ids = torch.tensor([tokenizer.encode(line)])
|
||||
>>> input_ids = torch.tensor([tokenizer.encode(line)])
|
||||
|
||||
with torch.no_grad():
|
||||
features = phobert(input_ids) # Models outputs are now tuples
|
||||
>>> with torch.no_grad():
|
||||
... features = phobert(input_ids) # Models outputs are now tuples
|
||||
|
||||
## With TensorFlow 2.0+:
|
||||
# from transformers import TFAutoModel
|
||||
# phobert = TFAutoModel.from_pretrained("vinai/phobert-base")
|
||||
>>> # With TensorFlow 2.0+:
|
||||
>>> # from transformers import TFAutoModel
|
||||
>>> # phobert = TFAutoModel.from_pretrained("vinai/phobert-base")
|
||||
|
||||
|
||||
The original code can be found `here <https://github.com/VinAIResearch/PhoBERT>`__.
|
||||
This model was contributed by `dqnguyen <https://huggingface.co/dqnguyen>`__. The original code can be found `here <https://github.com/VinAIResearch/PhoBERT>`__.
|
||||
|
||||
PhobertTokenizer
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
@ -43,6 +43,7 @@ outperforming parametric seq2seq models and task-specific retrieve-and-extract a
|
||||
tasks, we find that RAG models generate more specific, diverse and factual language than a state-of-the-art
|
||||
parametric-only seq2seq baseline.*
|
||||
|
||||
This model was contributed by `ola13 <https://huggingface.co/ola13>`__.
|
||||
|
||||
|
||||
RagConfig
|
||||
@ -56,7 +57,7 @@ RagTokenizer
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.RagTokenizer
|
||||
:members: prepare_seq2seq_batch
|
||||
:members:
|
||||
|
||||
|
||||
Rag specific outputs
|
||||
@ -94,3 +95,24 @@ RagTokenForGeneration
|
||||
|
||||
.. autoclass:: transformers.RagTokenForGeneration
|
||||
:members: forward, generate
|
||||
|
||||
|
||||
TFRagModel
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.TFRagModel
|
||||
:members: call
|
||||
|
||||
|
||||
TFRagSequenceForGeneration
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.TFRagSequenceForGeneration
|
||||
:members: call, generate
|
||||
|
||||
|
||||
TFRagTokenForGeneration
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.TFRagTokenForGeneration
|
||||
:members: call, generate
|
||||
|
||||
@ -32,7 +32,8 @@ layers instead of the standard residuals, which allows storing activations only
|
||||
N times, where N is the number of layers. The resulting model, the Reformer, performs on par with Transformer models
|
||||
while being much more memory-efficient and much faster on long sequences.*
|
||||
|
||||
The Authors' code can be found `here <https://github.com/google/trax/tree/master/trax/models/reformer>`__.
|
||||
This model was contributed by `patrickvonplaten <https://huggingface.co/patrickvonplaten>`__. The Authors' code can be
|
||||
found `here <https://github.com/google/trax/tree/master/trax/models/reformer>`__.
|
||||
|
||||
Axial Positional Encodings
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
@ -145,8 +146,8 @@ For training, the :class:`~transformers.ReformerModelWithLMHead` should be used
|
||||
|
||||
.. code-block::
|
||||
|
||||
input_ids = tokenizer.encode('This is a sentence from the training data', return_tensors='pt')
|
||||
loss = model(input_ids, labels=input_ids)[0]
|
||||
input_ids = tokenizer.encode('This is a sentence from the training data', return_tensors='pt')
|
||||
loss = model(input_ids, labels=input_ids)[0]
|
||||
|
||||
|
||||
ReformerConfig
|
||||
|
||||
@ -20,8 +20,8 @@ The RetriBERT model was proposed in the blog post `Explain Anything Like I'm Fiv
|
||||
Question Answering <https://yjernite.github.io/lfqa.html>`__. RetriBERT is a small model that uses either a single or
|
||||
pair of BERT encoders with lower-dimension projection for dense semantic indexing of text.
|
||||
|
||||
Code to train and use the model can be found `here
|
||||
<https://github.com/huggingface/transformers/tree/master/examples/distillation>`__.
|
||||
This model was contributed by `yjernite <https://huggingface.co/yjernite>`__. Code to train and use the model can be
|
||||
found :prefix_link:`here <examples/research-projects/distillation>`.
|
||||
|
||||
|
||||
RetriBertConfig
|
||||
|
||||
@ -44,7 +44,8 @@ Tips:
|
||||
separate your segments with the separation token :obj:`tokenizer.sep_token` (or :obj:`</s>`)
|
||||
- :doc:`CamemBERT <camembert>` is a wrapper around RoBERTa. Refer to this page for usage examples.
|
||||
|
||||
The original code can be found `here <https://github.com/pytorch/fairseq/tree/master/examples/roberta>`_.
|
||||
This model was contributed by `julien-c <https://huggingface.co/julien-c>`__. The original code can be found `here
|
||||
<https://github.com/pytorch/fairseq/tree/master/examples/roberta>`_.
|
||||
|
||||
|
||||
RobertaConfig
|
||||
@ -165,3 +166,38 @@ FlaxRobertaModel
|
||||
|
||||
.. autoclass:: transformers.FlaxRobertaModel
|
||||
:members: __call__
|
||||
|
||||
|
||||
FlaxRobertaForMaskedLM
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.FlaxRobertaForMaskedLM
|
||||
:members: __call__
|
||||
|
||||
|
||||
FlaxRobertaForSequenceClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.FlaxRobertaForSequenceClassification
|
||||
:members: __call__
|
||||
|
||||
|
||||
FlaxRobertaForMultipleChoice
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.FlaxRobertaForMultipleChoice
|
||||
:members: __call__
|
||||
|
||||
|
||||
FlaxRobertaForTokenClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.FlaxRobertaForTokenClassification
|
||||
:members: __call__
|
||||
|
||||
|
||||
FlaxRobertaForQuestionAnswering
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.FlaxRobertaForQuestionAnswering
|
||||
:members: __call__
|
||||
|
||||
153
docs/source/model_doc/speech_to_text.rst
Normal file
153
docs/source/model_doc/speech_to_text.rst
Normal file
@ -0,0 +1,153 @@
|
||||
..
|
||||
Copyright 2021 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
Speech2Text
|
||||
-----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
Overview
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The Speech2Text model was proposed in `fairseq S2T: Fast Speech-to-Text Modeling with fairseq
|
||||
<https://arxiv.org/abs/2010.05171>`__ by Changhan Wang, Yun Tang, Xutai Ma, Anne Wu, Dmytro Okhonko, Juan Pino. It's a
|
||||
transformer-based seq2seq (encoder-decoder) model designed for end-to-end Automatic Speech Recognition (ASR) and Speech
|
||||
Translation (ST). It uses a convolutional downsampler to reduce the length of speech inputs by 3/4th before they are
|
||||
fed into the encoder. The model is trained with standard autoregressive cross-entropy loss and generates the
|
||||
transcripts/translations autoregressively. Speech2Text has been fine-tuned on several datasets for ASR and ST:
|
||||
`LibriSpeech <http://www.openslr.org/12>`__, `CoVoST 2 <https://github.com/facebookresearch/covost>`__, `MuST-C
|
||||
<https://ict.fbk.eu/must-c/>`__.
|
||||
|
||||
This model was contributed by `valhalla <https://huggingface.co/valhalla>`__. The original code can be found `here
|
||||
<https://github.com/pytorch/fairseq/tree/master/examples/speech_to_text>`__.
|
||||
|
||||
|
||||
Inference
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Speech2Text is a speech model that accepts a float tensor of log-mel filter-bank features extracted from the speech
|
||||
signal. It's a transformer-based seq2seq model, so the transcripts/translations are generated autoregressively. The
|
||||
:obj:`generate()` method can be used for inference.
|
||||
|
||||
The :class:`~transformers.Speech2TextFeatureExtractor` class is responsible for extracting the log-mel filter-bank
|
||||
features. The :class:`~transformers.Speech2TextProcessor` wraps :class:`~transformers.Speech2TextFeatureExtractor` and
|
||||
:class:`~transformers.Speech2TextTokenizer` into a single instance to both extract the input features and decode the
|
||||
predicted token ids.
|
||||
|
||||
The feature extractor depends on :obj:`torchaudio` and the tokenizer depends on :obj:`sentencepiece` so be sure to
|
||||
install those packages before running the examples. You could either install those as extra speech dependancies with
|
||||
``pip install transformers"[speech, sentencepiece]"`` or install the packages seperatly with ``pip install torchaudio
|
||||
sentencepiece``. Also ``torchaudio`` requires the development version of the `libsndfile
|
||||
<http://www.mega-nerd.com/libsndfile/>`__ package which can be installed via a system package manager. On Ubuntu it can
|
||||
be installed as follows: ``apt install libsndfile1-dev``
|
||||
|
||||
|
||||
- ASR and Speech Translation
|
||||
|
||||
.. code-block::
|
||||
|
||||
>>> import torch
|
||||
>>> from transformers import Speech2TextProcessor, Speech2TextForConditionalGeneration
|
||||
>>> from datasets import load_dataset
|
||||
>>> import soundfile as sf
|
||||
|
||||
>>> model = Speech2TextForConditionalGeneration.from_pretrained("facebook/s2t-small-librispeech-asr")
|
||||
>>> processor = Speech2TextProcessor.from_pretrained("facebook/s2t-small-librispeech-asr")
|
||||
|
||||
>>> def map_to_array(batch):
|
||||
... speech, _ = sf.read(batch["file"])
|
||||
... batch["speech"] = speech
|
||||
... return batch
|
||||
|
||||
>>> ds = load_dataset("patrickvonplaten/librispeech_asr_dummy", "clean", split="validation")
|
||||
>>> ds = ds.map(map_to_array)
|
||||
|
||||
>>> inputs = processor(ds["speech"][0], sampling_rate=16_000, return_tensors="pt")
|
||||
>>> generated_ids = model.generate(input_ids=inputs["input_features"], attention_mask=inputs["attention_mask"])
|
||||
|
||||
>>> transcription = processor.batch_decode(generated_ids)
|
||||
|
||||
|
||||
- Multilingual speech translation
|
||||
|
||||
For multilingual speech translation models, :obj:`eos_token_id` is used as the :obj:`decoder_start_token_id` and
|
||||
the target language id is forced as the first generated token. To force the target language id as the first
|
||||
generated token, pass the :obj:`forced_bos_token_id` parameter to the :obj:`generate()` method. The following
|
||||
example shows how to transate English speech to French text using the `facebook/s2t-medium-mustc-multilingual-st`
|
||||
checkpoint.
|
||||
|
||||
.. code-block::
|
||||
|
||||
>>> import torch
|
||||
>>> from transformers import Speech2TextProcessor, Speech2TextForConditionalGeneration
|
||||
>>> from datasets import load_dataset
|
||||
>>> import soundfile as sf
|
||||
|
||||
>>> model = Speech2TextForConditionalGeneration.from_pretrained("facebook/s2t-medium-mustc-multilingual-st")
|
||||
>>> processor = Speech2TextProcessor.from_pretrained("facebook/s2t-medium-mustc-multilingual-st")
|
||||
|
||||
>>> def map_to_array(batch):
|
||||
... speech, _ = sf.read(batch["file"])
|
||||
... batch["speech"] = speech
|
||||
... return batch
|
||||
|
||||
>>> ds = load_dataset("patrickvonplaten/librispeech_asr_dummy", "clean", split="validation")
|
||||
>>> ds = ds.map(map_to_array)
|
||||
|
||||
>>> inputs = processor(ds["speech"][0], sampling_rate=16_000, return_tensors="pt")
|
||||
>>> generated_ids = model.generate(input_ids=inputs["input_features"], attention_mask=inputs["attention_mask], forced_bos_token_id=processor.tokenizer.lang_code_to_id["fr"])
|
||||
|
||||
>>> translation = processor.batch_decode(generated_ids)
|
||||
|
||||
|
||||
See the `model hub <https://huggingface.co/models?filter=speech_to_text>`__ to look for Speech2Text checkpoints.
|
||||
|
||||
|
||||
Speech2TextConfig
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.Speech2TextConfig
|
||||
:members:
|
||||
|
||||
|
||||
Speech2TextTokenizer
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.Speech2TextTokenizer
|
||||
:members: build_inputs_with_special_tokens, get_special_tokens_mask,
|
||||
create_token_type_ids_from_sequences, save_vocabulary
|
||||
|
||||
|
||||
Speech2TextFeatureExtractor
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.Speech2TextFeatureExtractor
|
||||
:members: __call__
|
||||
|
||||
|
||||
Speech2TextProcessor
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.Speech2TextProcessor
|
||||
:members: __call__, from_pretrained, save_pretrained, batch_decode, decode, as_target_processor
|
||||
|
||||
|
||||
Speech2TextModel
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.Speech2TextModel
|
||||
:members: forward
|
||||
|
||||
|
||||
Speech2TextForConditionalGeneration
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.Speech2TextForConditionalGeneration
|
||||
:members: forward
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user