mirror of
https://github.com/huggingface/transformers.git
synced 2025-11-04 20:14:36 +08:00
Compare commits
2622 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| b62ca59527 | |||
| a316a6aaa8 | |||
| 4bcc35cd69 | |||
| 482c9178d3 | |||
| 2513fe0d02 | |||
| 30245c0c60 | |||
| c34010551a | |||
| 01aa0b8527 | |||
| 96907367f1 | |||
| 3cdf8b7ec2 | |||
| 9db1f41604 | |||
| c950fef545 | |||
| 4544f906e2 | |||
| 92671532e7 | |||
| 9209d36f93 | |||
| 7cb52f53ef | |||
| 321c05abab | |||
| 28a690a80e | |||
| 45e26125de | |||
| 12dfbd4f7a | |||
| 98109464c1 | |||
| 1af58c0706 | |||
| efae6645e2 | |||
| 393b8dc09a | |||
| 5543b30aa6 | |||
| bf0d12c220 | |||
| 601d4d699c | |||
| fd405e9a93 | |||
| 798dbff6a7 | |||
| 834b6884c5 | |||
| 08c9607c3d | |||
| 79a82cc06a | |||
| 88d7f96e33 | |||
| fc5bce9e60 | |||
| 135791e8ef | |||
| 7cc15bdd96 | |||
| 2ffef0d0c7 | |||
| 364a5ae1f0 | |||
| 315f464b0a | |||
| 24f46ea3f3 | |||
| 27cf1d97f0 | |||
| e008d520bb | |||
| 6a495cae00 | |||
| 0e1fce3c01 | |||
| 5543efd5cc | |||
| 40457bcebb | |||
| d12ceb48ba | |||
| 7ac9110711 | |||
| 0148c262e7 | |||
| 70c1e1d2d5 | |||
| 4965aee064 | |||
| fc24a93e64 | |||
| 0a3d0e02c5 | |||
| 6894b486d0 | |||
| 1121ce9f98 | |||
| cf10d4cfdd | |||
| 609e0c583f | |||
| c9163a8d5a | |||
| f216b60671 | |||
| 49f6e7a3c6 | |||
| c2a26ec8a6 | |||
| 64c393ee74 | |||
| b29683736a | |||
| 9fe09cec76 | |||
| 7c41057d50 | |||
| 5e85b324ec | |||
| 5e31a98ab7 | |||
| 033124e5f8 | |||
| 7ca6627ec3 | |||
| 54e9ce785d | |||
| 9022ef021a | |||
| 173528e368 | |||
| 76e5af4cfd | |||
| c01480bba3 | |||
| 58918c76f4 | |||
| b28b537131 | |||
| 11fdde0271 | |||
| 1ae132a07d | |||
| 5144104070 | |||
| 0d158e38c9 | |||
| f5c2a122e3 | |||
| 06b60c8b05 | |||
| 35e0687256 | |||
| 22d2c8ea2f | |||
| 2589505693 | |||
| d8c26ed139 | |||
| a34fb91d54 | |||
| ffabcf5249 | |||
| 3363a19b12 | |||
| 0cca61925c | |||
| 1c5cd8e5f5 | |||
| c439752482 | |||
| 417e492f1e | |||
| 75e1eed8d1 | |||
| 4e741efa92 | |||
| 1262495a91 | |||
| 88429c57bc | |||
| 76ee9c8bc9 | |||
| bf493d5569 | |||
| e9ef21175e | |||
| ebc36108dc | |||
| d2a7c86dc3 | |||
| 90f4b24520 | |||
| f4e1f02210 | |||
| 866a8ccabb | |||
| b99ad457f4 | |||
| eb0ca71ef6 | |||
| fa0be6d761 | |||
| 18a0150bfa | |||
| 3fe75c7f70 | |||
| 59345cc87f | |||
| bc3a0c0607 | |||
| 68e19f1c22 | |||
| c0c577cf8f | |||
| aa6a29bc25 | |||
| 2fd28d4363 | |||
| f1679d7c48 | |||
| 5ed94b2312 | |||
| d97b4176e5 | |||
| 9a3f91088c | |||
| f45e873910 | |||
| e33929ef1e | |||
| 84be482f66 | |||
| 2db1e2f415 | |||
| 5f721ad6e4 | |||
| a258982af3 | |||
| 32e94cff64 | |||
| 973433260e | |||
| 8a377c3d6e | |||
| 3d3e605aff | |||
| ca2d0f98c4 | |||
| 279d8e24f7 | |||
| 355954ffca | |||
| 18177a1a60 | |||
| efeb75b805 | |||
| bb154ac50c | |||
| fb6cccb863 | |||
| 38bba9cdd5 | |||
| f1a3d03741 | |||
| 90c833870c | |||
| 049e14f0e3 | |||
| 20fa828984 | |||
| 5e06963394 | |||
| 204ebc25e6 | |||
| 043f9f51f9 | |||
| cd40f6564e | |||
| 70bc3ead4f | |||
| 7291ea0bff | |||
| e4aaa45805 | |||
| 011cc0be51 | |||
| af497b5672 | |||
| 49c5202522 | |||
| c3e607496c | |||
| 439aa1d6e9 | |||
| 3d495c61ef | |||
| d5477baf7d | |||
| c852036b4a | |||
| 0c55a384f8 | |||
| 0946d1209d | |||
| edcb3ac59a | |||
| 9e03364999 | |||
| f9f8a5312e | |||
| 36434220fc | |||
| ebba39e4e1 | |||
| bbad4c6989 | |||
| 1bf4098e03 | |||
| 7b5a1e7d51 | |||
| a9f1fc6c94 | |||
| 7b685f5229 | |||
| 1affde2f10 | |||
| f7c93b3cee | |||
| 66bcfbb130 | |||
| d812e6d76e | |||
| ebab096e86 | |||
| 9ad36ad57f | |||
| 9931f817b7 | |||
| 9208f57b16 | |||
| 403d309857 | |||
| ca5e1cdf8e | |||
| e93ccb3290 | |||
| 538531cde5 | |||
| fe24139702 | |||
| 9aa219a1fe | |||
| 86578bb04c | |||
| 5620033115 | |||
| 473808da0d | |||
| caf3746678 | |||
| 6293eb04df | |||
| 08b59d10e5 | |||
| 20451195f0 | |||
| 699541c4b3 | |||
| e80d6c689b | |||
| d541938c48 | |||
| 1e2631d6f8 | |||
| f6da8b2200 | |||
| 1c986f42ff | |||
| 3ae2e86baf | |||
| 466aa57a45 | |||
| ef2dcdccaa | |||
| 5d63ca6c38 | |||
| 4e10acb3e5 | |||
| 3b3619a327 | |||
| ac99217e92 | |||
| 0a375f5abd | |||
| e8db8b845a | |||
| 29c36e9f36 | |||
| 13aa174112 | |||
| 6e603cb789 | |||
| f90bc44d9a | |||
| 2cfb947f59 | |||
| f0340b3031 | |||
| 02e5f79662 | |||
| 9f5d5a531d | |||
| 41a1d27cde | |||
| a139d1a160 | |||
| 4c7f564f9a | |||
| 37be3786cf | |||
| 42860e92a4 | |||
| 36dfc317b3 | |||
| 439f1cab20 | |||
| c0554776de | |||
| e817747941 | |||
| b6f365a8ed | |||
| e33fdc93b4 | |||
| c58e6c129a | |||
| ddf9a3dfc7 | |||
| 2d372a990b | |||
| 56d5d160cd | |||
| 4ab7424597 | |||
| 875288b344 | |||
| 8cca875569 | |||
| f1fe18465d | |||
| 5c0cfc2cf0 | |||
| 4dd5cf2207 | |||
| 3723f30a18 | |||
| acaa2e6267 | |||
| fa661ce749 | |||
| 79ab881eb1 | |||
| b9109f2de1 | |||
| ceaab8dd22 | |||
| f9414f7553 | |||
| ccd26c2862 | |||
| 2a4b9e09c0 | |||
| 0e1869cc28 | |||
| 48a05026de | |||
| 12d0eb5f3e | |||
| 17a88d3192 | |||
| fb52143cf6 | |||
| 5f077a3445 | |||
| cd4e07a85e | |||
| 492b352ab6 | |||
| e645b9ab94 | |||
| 2b8b6c929e | |||
| 5bf9afbf35 | |||
| efae154929 | |||
| 2e4de76231 | |||
| ed4df85572 | |||
| 1b5820a565 | |||
| 3e5928c57d | |||
| 99207bd112 | |||
| 8ed47aa10b | |||
| 9ca485734a | |||
| b231a413f5 | |||
| 70f7423436 | |||
| d976ef262e | |||
| b42586ea56 | |||
| b43c78e5d3 | |||
| d4c2cb402d | |||
| 47a551d17b | |||
| f6d5046af1 | |||
| 88762a2f8c | |||
| d3ef14f931 | |||
| 7677936316 | |||
| 6449c494d0 | |||
| ec8717d5d8 | |||
| 751a1e0890 | |||
| ec62b7d953 | |||
| bf760c80b5 | |||
| 9d7d9b3ae0 | |||
| 2a3c88a659 | |||
| 4ac462bfb8 | |||
| 35fa0bbca0 | |||
| cc746a5020 | |||
| b11386e158 | |||
| 8b5d4003ab | |||
| 5c8e5b3709 | |||
| db2a3b2e01 | |||
| 5f8f2d849a | |||
| b41948f5cd | |||
| fb8f4277b2 | |||
| d489a6d3d5 | |||
| e4c07faf0a | |||
| 667003e447 | |||
| ed23f5909e | |||
| 3750b9b0b0 | |||
| 036c2c6b02 | |||
| 74872c19d3 | |||
| 0866669e75 | |||
| 6f82aea66b | |||
| 33b7532e69 | |||
| 56ee2560be | |||
| e2230ba77b | |||
| 3a5d1ea2a5 | |||
| 9c17256447 | |||
| 91487cbb8e | |||
| b5015a2a0f | |||
| fe5cb1a1c8 | |||
| aecaaf73a4 | |||
| 5e737018e1 | |||
| e444648a30 | |||
| 3cc2c2a150 | |||
| ef03ae874f | |||
| 96f57c9ccb | |||
| ec4cdfdd05 | |||
| 4402879ee4 | |||
| 6a17688021 | |||
| 1381b6d01d | |||
| 5acb4edf25 | |||
| 842588c12f | |||
| ac1a612179 | |||
| 07797c4da4 | |||
| a9aa7456ac | |||
| a801c7fd74 | |||
| 6458c0e268 | |||
| ea4e7a53fa | |||
| 937930dcae | |||
| bac1cc4dc1 | |||
| 003c477129 | |||
| 5ddd8d6531 | |||
| 8cc6807e89 | |||
| c589eae2b8 | |||
| a163c9ca5b | |||
| 1d69028989 | |||
| b86e42e0ac | |||
| 365d452d4d | |||
| 3e3e552125 | |||
| 3dea40b858 | |||
| 5139733623 | |||
| c9c385c522 | |||
| adab7f8332 | |||
| 8f7c1c7672 | |||
| 4c6b218056 | |||
| 50d1ce411f | |||
| 03d8527de0 | |||
| a34a9896ac | |||
| e19b978151 | |||
| 996f393a86 | |||
| 0f6969b7e9 | |||
| ab44630db2 | |||
| 2c1ebb8b50 | |||
| e6aeb0d3e8 | |||
| 95a26fcf2d | |||
| 89d795f180 | |||
| 35df911485 | |||
| f7677e1623 | |||
| 12e6afe900 | |||
| ef22ba4836 | |||
| 10d72390c0 | |||
| e0db6bbd65 | |||
| bd6e301832 | |||
| a086527727 | |||
| 9d2ce253de | |||
| 49296533ca | |||
| 271bedb485 | |||
| 865d4d595e | |||
| a3af8e86cb | |||
| eacea530c1 | |||
| fa2fbed3e5 | |||
| e708bb75bf | |||
| 49c06132df | |||
| cacb654c7f | |||
| 30a09f3827 | |||
| 14cb5b35fa | |||
| 6dc52c78d8 | |||
| ed5456daf4 | |||
| c76450e20c | |||
| 9907dc523a | |||
| efbc1c5a9d | |||
| 956c4c4eb4 | |||
| 48c3a70b4e | |||
| aa925a52fa | |||
| 5856999a9f | |||
| 07dd7c2fd8 | |||
| 8f1d047148 | |||
| 31eedff5a0 | |||
| 384f0eb2f9 | |||
| bf14ef75f1 | |||
| 5e7fe8b585 | |||
| 4c06893610 | |||
| 9de4afa897 | |||
| 42e8fbfc51 | |||
| 54065d68b8 | |||
| e28b7e2311 | |||
| 09b933f19d | |||
| 235777ccc9 | |||
| 9ddd3a6548 | |||
| c5aa114392 | |||
| ca4a3f4da9 | |||
| 24538df919 | |||
| a699525d25 | |||
| d9ece8233d | |||
| d39bf0ac2d | |||
| 590adb130b | |||
| 026a5d0888 | |||
| fa6113f9a0 | |||
| 757baee846 | |||
| a27c795908 | |||
| 31c799a0c9 | |||
| 8581a670e3 | |||
| 18d233d525 | |||
| 3e0f062106 | |||
| fc2a4c88ce | |||
| 55bda52555 | |||
| ad02c961c6 | |||
| 15550ce0d1 | |||
| 62427d0815 | |||
| 34706ba050 | |||
| edf9ac11d4 | |||
| b908f2e9dd | |||
| af2e6bf87c | |||
| 7defc6670f | |||
| 84894974bd | |||
| db0076a9df | |||
| 2d05480174 | |||
| 035678efdb | |||
| b9c9e05381 | |||
| 9535bf1977 | |||
| 7822cd38a0 | |||
| 448c467256 | |||
| c547f15a17 | |||
| 015f7812ed | |||
| ef46ccb05c | |||
| 94cb73c2d2 | |||
| a0eebdc404 | |||
| 7cb203fae4 | |||
| 9a687ebb77 | |||
| 839bfaedb2 | |||
| 2d184cb553 | |||
| ca13618681 | |||
| 1e51bb717c | |||
| 241759101e | |||
| 7d7fe4997f | |||
| 0a97f6312a | |||
| 15a121fec5 | |||
| 15d45211f7 | |||
| 8a017cbb5a | |||
| 4bf5042240 | |||
| e4512aab3b | |||
| 65be574aec | |||
| 31e67dd19f | |||
| 30e343862f | |||
| 56e8ef632f | |||
| ba6f6e44a8 | |||
| 9524956819 | |||
| 61d22f9cc7 | |||
| cd40cb8879 | |||
| 82601f4c1a | |||
| 39994051e4 | |||
| 051dcb2a07 | |||
| 41e8291217 | |||
| 3f42eb979f | |||
| 8fdb7997c6 | |||
| 4658896ee1 | |||
| bf64b8cf09 | |||
| 94b57bf796 | |||
| cffbb3d8ed | |||
| 5f50d619dd | |||
| 7751be7cee | |||
| ac7d5f67a2 | |||
| 336116d960 | |||
| b290c32e16 | |||
| 3487be75ef | |||
| 9d2f467bfb | |||
| 7b75aa9fa5 | |||
| 274d850d34 | |||
| 26dad0a9fa | |||
| 9ebb5b2a54 | |||
| 9e54efd004 | |||
| a8b798e6c4 | |||
| 242005d762 | |||
| 5940c73bbb | |||
| cf08830c28 | |||
| 8bf7312654 | |||
| c99fe0386b | |||
| 66113bd626 | |||
| 6669915b65 | |||
| 612fa1b10b | |||
| 2e57824374 | |||
| e7cfc1a313 | |||
| 0ae96ff8a7 | |||
| cafa6a9e29 | |||
| e4fd5e3999 | |||
| ebf80e2e70 | |||
| 026097b9ee | |||
| 0a6cbea0a5 | |||
| 99d1a69444 | |||
| 74ffc9ea6b | |||
| 96c78396ce | |||
| dca34695d0 | |||
| 877fc56410 | |||
| aad50151f3 | |||
| 25296b12aa | |||
| 9972562d33 | |||
| ff8ed52dd8 | |||
| 4c3be2e718 | |||
| 17ae0363db | |||
| a638e986f4 | |||
| fd2174664c | |||
| 79b1c6966b | |||
| 818463ee8e | |||
| 858b1d1e5a | |||
| 8e67573a64 | |||
| 6af3306a1d | |||
| 1cdd2ad2af | |||
| 5f4f6b65b3 | |||
| 7da051f135 | |||
| 14911e2e12 | |||
| 9e97c87539 | |||
| 4c5bd92183 | |||
| 5282b31df4 | |||
| 1e616c0af3 | |||
| abb1fa3f37 | |||
| 0ccbfd2868 | |||
| 2d8340a91f | |||
| d713cfc5eb | |||
| f3d44301cc | |||
| 27d55125e6 | |||
| e80be7f1d0 | |||
| 18db92dd9a | |||
| b8686174be | |||
| f39217a5ec | |||
| f54dc3f4d5 | |||
| 6b410bedfc | |||
| 8829ace4aa | |||
| 1851a64b6f | |||
| 443e5e34af | |||
| 60e1556a44 | |||
| fa9365eca5 | |||
| afe002b04c | |||
| 8b5e5ebcf9 | |||
| ab90353f1a | |||
| 452dd0e4d9 | |||
| 7f9193ef09 | |||
| 64070cbb88 | |||
| e73595bd64 | |||
| 2c77842887 | |||
| 6faca88ee0 | |||
| 211e130811 | |||
| 455c639093 | |||
| 8ba4c5885f | |||
| 847e7f3379 | |||
| d714dfeaa8 | |||
| d52b0e294a | |||
| 55adefe428 | |||
| 0ac6d0bf33 | |||
| c73c83b0e6 | |||
| 4a94c062a4 | |||
| c7d06b79ae | |||
| 9a0a8c1c6f | |||
| fa49b9afea | |||
| 180585741c | |||
| 52679fbc2e | |||
| b5c6d3d4c7 | |||
| 2fade302ac | |||
| 20c3b8cab4 | |||
| b3f272ffcb | |||
| 518f291eef | |||
| d7b3bf547c | |||
| db9d56c08a | |||
| 41750a6cff | |||
| 12bb7fe770 | |||
| 97a375484c | |||
| 4e817ff418 | |||
| 73d6a2f901 | |||
| 623ba0236d | |||
| f4078e0db6 | |||
| 03322b4261 | |||
| c811526004 | |||
| b0167632ce | |||
| c53cc018de | |||
| cbbb3c43c5 | |||
| 77b75d2c78 | |||
| 6ba254ee54 | |||
| a79a9e1241 | |||
| 8e093e5981 | |||
| 6af5a54c28 | |||
| 7c2a32ff88 | |||
| a946b6b51b | |||
| cb3c2212c7 | |||
| d698b87f20 | |||
| 13dd2acca4 | |||
| f16540fcba | |||
| 1dc9b3c784 | |||
| dd9d483d03 | |||
| eb5601b0a5 | |||
| 53f5ef6df5 | |||
| d32585a304 | |||
| 7d40901ce3 | |||
| b1ff0b2ae7 | |||
| 7f23af1684 | |||
| 03121deba3 | |||
| 15b9868f8b | |||
| 2c05b8a56c | |||
| c79b550dd0 | |||
| c4158a6314 | |||
| 857ccdb259 | |||
| a504cb49ec | |||
| 52c85f847a | |||
| a21d4fa410 | |||
| 827d6d6ef0 | |||
| 60a42ef1c0 | |||
| 88aecee6a2 | |||
| 73efa694e6 | |||
| e9d0bc027a | |||
| 8b63a01d95 | |||
| 1d4a35b396 | |||
| d13eca11e2 | |||
| b0c9fbb293 | |||
| c19727fd38 | |||
| edf0582c0b | |||
| 6d00033e97 | |||
| f399c00610 | |||
| f0c96fafd1 | |||
| 0cec4fab7d | |||
| 14cdeee75a | |||
| 16469fedbd | |||
| 80a1694514 | |||
| d486795158 | |||
| b1e2368b32 | |||
| baca8fa8e6 | |||
| 38f7461df3 | |||
| a5b249472e | |||
| dbd041243d | |||
| d22894dfd4 | |||
| c59b1e682d | |||
| 301bf8d1b4 | |||
| 01c37dcdb5 | |||
| 092cf881a5 | |||
| 352d5472b0 | |||
| 5ebd898953 | |||
| 7972a4019f | |||
| f8c1071c51 | |||
| 700ccf6e35 | |||
| b7cf9f43d2 | |||
| 551b450527 | |||
| cbad305ce6 | |||
| b169ac9c2b | |||
| 7a7fdf71f8 | |||
| f98d0ef2a2 | |||
| ce2298fb5f | |||
| 9384e5f6de | |||
| bc65afc4df | |||
| 31baeed614 | |||
| f8208fa456 | |||
| 6435b9f908 | |||
| 500aa12318 | |||
| a594ee9c84 | |||
| 83703cd077 | |||
| a1b3b4167e | |||
| 747907dc5e | |||
| 715aa5b135 | |||
| e344e3d402 | |||
| b0ad069517 | |||
| 80fa0f7812 | |||
| 05deb52dc1 | |||
| 0a4b1068e1 | |||
| 5aa8a278a3 | |||
| 11cc1e168b | |||
| 0a9d09b42a | |||
| 96ab75b8dd | |||
| e52d1258e0 | |||
| 0ac33ddd8d | |||
| 326e6ebae7 | |||
| 43eca3f878 | |||
| 6bec88ca42 | |||
| 769b60f935 | |||
| c4bcb01906 | |||
| 6903a987b8 | |||
| 760872dbde | |||
| 47e1334c0b | |||
| 529534dc2f | |||
| 261c4ff4e2 | |||
| 39a34cc375 | |||
| ea6dba2787 | |||
| 11c3257a18 | |||
| 36bffc81b3 | |||
| 2ee410560e | |||
| 1789c7daf1 | |||
| b809d2f073 | |||
| 4ab8ab4f50 | |||
| ac40eed1a5 | |||
| fd9995ebc5 | |||
| 5d912e7ed4 | |||
| 94eb68d742 | |||
| 243e687be6 | |||
| 3e4b4dd190 | |||
| c6acd246ec | |||
| d5d7d88612 | |||
| 8594dd80dd | |||
| 216e167ce6 | |||
| 1ac6a246d8 | |||
| e91692f4a3 | |||
| 8e287d507d | |||
| 81484b447b | |||
| 9f6349aba9 | |||
| ddb1ce7418 | |||
| f68d22850c | |||
| c50aa67bff | |||
| 1b10159950 | |||
| 390c128592 | |||
| ab5d06a094 | |||
| a4ee4da18a | |||
| 06dd597552 | |||
| 9de9ceb6c5 | |||
| b815edf69f | |||
| 8538ce9044 | |||
| c1a6252be1 | |||
| 50e15c825c | |||
| b38d552a92 | |||
| ae6834e028 | |||
| 0373b60c4c | |||
| 83d1fbcff6 | |||
| 55bcae7f25 | |||
| 42e1e3c67f | |||
| 57b0fab692 | |||
| a8d4dff0a1 | |||
| 4a5663568f | |||
| bbedb59675 | |||
| c2cf192943 | |||
| c82ef72158 | |||
| b48a1f08c1 | |||
| 99833a9cbf | |||
| ebceeeacda | |||
| a6c4ee27fd | |||
| e5c393dceb | |||
| 8deff3acf2 | |||
| 1f72865726 | |||
| cc598b312b | |||
| d38bbb225f | |||
| eff757f2e3 | |||
| a009d751c2 | |||
| 6f5a12a583 | |||
| 296252c49e | |||
| 75ec6c9e3a | |||
| 5b44e0a31b | |||
| 33ef7002e1 | |||
| f6a23d1911 | |||
| 601ac5b1dc | |||
| 17dceae7a1 | |||
| 00ea100e96 | |||
| b08259a120 | |||
| f4f4946836 | |||
| fa9af2468a | |||
| ff80b73157 | |||
| e2c05f06ef | |||
| 3ee431dd4c | |||
| 53fe733805 | |||
| c10decf7a0 | |||
| 63f4d8cad0 | |||
| 2b2a2f8df2 | |||
| 1a5aefc95c | |||
| 39371ee454 | |||
| 5ad2ea06af | |||
| b4fb94fe6d | |||
| e703e923ca | |||
| 1a6c546c6f | |||
| 311970546f | |||
| 7420a6a9cc | |||
| 022e8fab97 | |||
| 3c5c567507 | |||
| 9c683ef01e | |||
| ffcffebe85 | |||
| 010e0460b2 | |||
| ffa17fe322 | |||
| 83272a3853 | |||
| ccbe839ee0 | |||
| 3d76df3a12 | |||
| eaabaaf750 | |||
| f8823bad9a | |||
| d0c36a7b72 | |||
| fbc5bf10cf | |||
| b88bda6af3 | |||
| b31ef225cf | |||
| b4009cb001 | |||
| d3283490ef | |||
| e279a312d6 | |||
| 7372e62b2c | |||
| 471cce24b3 | |||
| e392ba6938 | |||
| a8e3336a85 | |||
| ec6766a363 | |||
| f7dcf8fcea | |||
| e25c4f4027 | |||
| 85b324bee5 | |||
| b7aa077a63 | |||
| f740177c87 | |||
| e52482909b | |||
| 28424906c2 | |||
| 18eec3a984 | |||
| cf72479bf1 | |||
| 634bf6cf7e | |||
| 265709f5cd | |||
| 115abd2166 | |||
| 95e00d0808 | |||
| 8becb73293 | |||
| ecfd336318 | |||
| 8eeefcb576 | |||
| bbf26c4e61 | |||
| 656e1386a2 | |||
| 0c44b11917 | |||
| e99af3b17b | |||
| 39db055268 | |||
| dedc7a8fdb | |||
| 676adf8625 | |||
| 11d8bcc9d7 | |||
| f049be7ad4 | |||
| 3bedfd3347 | |||
| b2c2c31c60 | |||
| 4e4403c9b4 | |||
| c44a17db1b | |||
| ad7233fc01 | |||
| cd21d8bc00 | |||
| 8d3e218ea6 | |||
| cec3cdda15 | |||
| f6d813aaaa | |||
| 939328111b | |||
| 29442d2edf | |||
| 20139b7c8d | |||
| cae334c43c | |||
| 4b1970bb4c | |||
| d6afbd323d | |||
| 292186a3e7 | |||
| efdb46b6e2 | |||
| ddb10c6447 | |||
| d7f98cd3ef | |||
| 38a555a83c | |||
| 2b60a26b46 | |||
| e41212c715 | |||
| 0f1bc0d68e | |||
| 930c9412b4 | |||
| e8f44af5bf | |||
| 2187c49f5c | |||
| bd3feddf67 | |||
| 68ef0a111f | |||
| b2c1a447fe | |||
| b2028cc26b | |||
| 4759176313 | |||
| 11573231c6 | |||
| de697935a2 | |||
| 3ddd2029bc | |||
| 879e1d3234 | |||
| af471ce5e8 | |||
| 5ea8ba67b4 | |||
| 3814e167d9 | |||
| 2bd79e23de | |||
| 8320feec09 | |||
| ab756f713c | |||
| 4f75d380a4 | |||
| c2ee3840ae | |||
| cc4c37952a | |||
| afea70c01c | |||
| 087465b943 | |||
| 6a82f774f2 | |||
| f1c71da115 | |||
| 6047f46b19 | |||
| c11160114a | |||
| 2e81b9d8d7 | |||
| 72768b6b9c | |||
| a4c75f1492 | |||
| 824e320d96 | |||
| c6cf925ff8 | |||
| 14e455b716 | |||
| f65f74bbce | |||
| 324292cfc7 | |||
| e43afb1bb8 | |||
| 5085df995f | |||
| 19a63d8245 | |||
| dc848c2994 | |||
| 6ad221daf3 | |||
| 735180aa14 | |||
| 6c61c0801e | |||
| 235616686a | |||
| 5bb00c817f | |||
| 601e424750 | |||
| 1b9e765b21 | |||
| db29ffc978 | |||
| ac303eae46 | |||
| bc9d5d917c | |||
| a332cc9f7f | |||
| 1ba21f96ca | |||
| d997ac7810 | |||
| 7351a8dbaf | |||
| 9b8ee8cea0 | |||
| ca1330f0b2 | |||
| 10989715d0 | |||
| cf06290565 | |||
| 374deef48d | |||
| a2c8e516c2 | |||
| ca2047bc35 | |||
| 41b437ea3a | |||
| a5751f7578 | |||
| 629aac92ec | |||
| d880a5fbde | |||
| 2acfe63964 | |||
| c62444da39 | |||
| 77e6775065 | |||
| 333affcb81 | |||
| 421216997b | |||
| 7a11e925cf | |||
| 5b3000d933 | |||
| aceb3fbaf4 | |||
| 7cba11fb9b | |||
| ff648221bd | |||
| c0d9dd3ba9 | |||
| d8e2b3c547 | |||
| d6de6423ba | |||
| 0e56dc3078 | |||
| 270dfa1c8e | |||
| 2661d80687 | |||
| 6a13448ad2 | |||
| e57533cca5 | |||
| 31f2437f07 | |||
| 5ca356a464 | |||
| f51ba059b9 | |||
| cbf8f5d32b | |||
| 525b6b1c54 | |||
| 3aca02efb3 | |||
| 5164ea91a7 | |||
| 49debe62fd | |||
| 847d370301 | |||
| eb3e6cb04f | |||
| 9050ffe035 | |||
| efb619235c | |||
| b12541c4dc | |||
| 3e624c64ca | |||
| b73dd1a0e4 | |||
| 4620caa864 | |||
| fbd02d4693 | |||
| b4a3a64744 | |||
| b29fed790b | |||
| 66c827656f | |||
| 314bdc7c14 | |||
| 575976144a | |||
| e03129ad44 | |||
| 08a70fb392 | |||
| 0ae91c80aa | |||
| d6df9a8ffe | |||
| c52716d46c | |||
| 73a0c25376 | |||
| ed37f9fa4f | |||
| 0416d437fb | |||
| db9279dedb | |||
| e58b3ec5df | |||
| 6ffe03a0a1 | |||
| 3e5da38dae | |||
| 9499a3778e | |||
| 9362eb4a07 | |||
| c8035e11e8 | |||
| 58fc8f97a3 | |||
| 857e0a0d3b | |||
| fa2aa699da | |||
| 146c521235 | |||
| b623ddc000 | |||
| 0001d05686 | |||
| 1741d740f2 | |||
| bbabbc1613 | |||
| 14d40584b2 | |||
| 1360dacaa3 | |||
| 810079de1f | |||
| c203509d5b | |||
| c36fdc88d4 | |||
| 7ac47bfe69 | |||
| be02176a4b | |||
| 8a2d9bc9ef | |||
| 012cbdb0f5 | |||
| 31acb8dc52 | |||
| 06a6cb6f36 | |||
| e33ed12c3b | |||
| 4220fd52b9 | |||
| c47394b0c9 | |||
| 4c91a3af94 | |||
| 4be01e5cbf | |||
| a355f4f0fc | |||
| d262a5d48e | |||
| 30624f7056 | |||
| 3f067f4409 | |||
| f564f93c84 | |||
| ff9e79ba3a | |||
| 07a79db505 | |||
| 4f338ed407 | |||
| 6fe1cc0874 | |||
| bdd3d0c76d | |||
| c440030e99 | |||
| 3b7f95a506 | |||
| 1bca97ec7f | |||
| 189113d891 | |||
| 76111a3d3a | |||
| a43c388abb | |||
| ec60e0ae7a | |||
| 6a143bf282 | |||
| 932eab943d | |||
| 256cbbc4a2 | |||
| 006097f8ad | |||
| 18f4b9274f | |||
| 71c8711970 | |||
| 7a89a3e493 | |||
| c4c4c9998a | |||
| 2529b2d37e | |||
| 61fef6e957 | |||
| 34de670dbe | |||
| 6701fb7859 | |||
| b1116fd673 | |||
| 96c4990165 | |||
| 470753bcf5 | |||
| 0c716ede8c | |||
| e9e6efdc45 | |||
| f631e01d2c | |||
| 5b396457e5 | |||
| 5c5af879b6 | |||
| b8da16f390 | |||
| ba28170717 | |||
| a088d75e51 | |||
| 4134100363 | |||
| b31f715019 | |||
| c0c7ec3458 | |||
| eec5ec8071 | |||
| 6b1558bad8 | |||
| f169957d0c | |||
| d3eb7d23a4 | |||
| 2c7749784c | |||
| 0e56b37e80 | |||
| 2fdc7f6ce8 | |||
| 13afb71208 | |||
| c0135194eb | |||
| b54ef78d0c | |||
| 6b1ff25084 | |||
| 298bed16a8 | |||
| 852e032ca6 | |||
| b5509abb36 | |||
| d6ef587a10 | |||
| e36bd94345 | |||
| 908fa43b54 | |||
| 8bcb37bfb8 | |||
| 6a37588041 | |||
| f4ff44a6d9 | |||
| f71157529e | |||
| aceb6a0907 | |||
| d762d4289c | |||
| 9495d38b0d | |||
| b370cc7e99 | |||
| f5516805c2 | |||
| 5bc99e7f33 | |||
| fdd61b1992 | |||
| 9cda3620b6 | |||
| 9df74b8bc4 | |||
| bb7c468520 | |||
| c913eb9c38 | |||
| e8ce63ff21 | |||
| 7a7ee28cb9 | |||
| 65e7c90a77 | |||
| f5b50c6b8e | |||
| ec16142ee5 | |||
| e645dcbb70 | |||
| e693cd1e87 | |||
| 4fc63151af | |||
| b90745c590 | |||
| 3716c3d8af | |||
| f9ec5ca90b | |||
| 4cd9c0971c | |||
| ee60840ee6 | |||
| 6a50d501ec | |||
| 65d74c4965 | |||
| a143d9479e | |||
| 286d1ec746 | |||
| 7984a70ee4 | |||
| 21d8b6a33e | |||
| 17c45c39ed | |||
| 8194df8e0c | |||
| 38f5fe9e02 | |||
| 105dcb4162 | |||
| 33eb8a165d | |||
| 869b66f6b3 | |||
| 129f0604ac | |||
| 0e84559d64 | |||
| 92487a1dc0 | |||
| c36416e53c | |||
| cafc4dfc7c | |||
| 34b4b5a9ed | |||
| 7df12d7bf8 | |||
| cc6775cdf5 | |||
| 94ff2d6ee8 | |||
| b5b3445c4f | |||
| fc38d4c86f | |||
| c749a543fa | |||
| 5211d333bb | |||
| 3e98f27e4a | |||
| 4452b44b90 | |||
| 53ce3854a1 | |||
| 564fd75d65 | |||
| 889d3bfdbb | |||
| 197d74f988 | |||
| ea8eba35e2 | |||
| e2a6445ebb | |||
| 9b3093311f | |||
| b662f0e625 | |||
| ab1238393c | |||
| 976e9afece | |||
| cbc5705541 | |||
| d490b5d500 | |||
| 2708b44ee9 | |||
| 1abd53b1aa | |||
| e676764241 | |||
| 59c23ad9c9 | |||
| 22b2b5790e | |||
| fb560dcb07 | |||
| 3f3fa7f7da | |||
| ffb93ec0cc | |||
| 20fc18fbda | |||
| 2ae98336d1 | |||
| 0dbddba6d2 | |||
| 29ab4b7f40 | |||
| c88ed74ccf | |||
| 5b2d4f2657 | |||
| fb4d8d0832 | |||
| 6083c1566e | |||
| 73028c5df0 | |||
| 81fb8d3251 | |||
| 4e69104a1f | |||
| 73d79d42b4 | |||
| 47b735f994 | |||
| 7d22fefd37 | |||
| 61a2b7dc9d | |||
| 6e261d3a22 | |||
| 4e597c8e4d | |||
| 925a13ced1 | |||
| 575a3b7aa1 | |||
| 4d36472b96 | |||
| 8514018300 | |||
| 1eec69a900 | |||
| 8744402f1e | |||
| 7f98edd7e3 | |||
| f1e8a51f08 | |||
| 0ed630f139 | |||
| ef74b0f07a | |||
| f54a5bd37f | |||
| 569897ce2c | |||
| 21da895013 | |||
| 9a70910d47 | |||
| 9274734a0d | |||
| 69f948461f | |||
| e0b6247cf7 | |||
| 5f2dd71d1b | |||
| 31158af57c | |||
| 5dd61fb9a9 | |||
| ee5de0ba44 | |||
| bed38d3afe | |||
| 498d06e914 | |||
| 3e3a9e2c01 | |||
| 1f5db9a13c | |||
| 95bac8dabb | |||
| ba498eac38 | |||
| 68ccc04ee6 | |||
| 539f601be7 | |||
| cfb7d108bd | |||
| b4691a438d | |||
| fc325e97cd | |||
| fd639e5be3 | |||
| 63a5399bc4 | |||
| 125a75a121 | |||
| 9c64d1da35 | |||
| bf99014c46 | |||
| 92e974196f | |||
| 6aa7973aec | |||
| 520e7f2119 | |||
| dd28830327 | |||
| db97930122 | |||
| 7046de2991 | |||
| 0d3aa3c04c | |||
| d8b43600fd | |||
| ee5a6856ca | |||
| 73368963b2 | |||
| d7dabfeff5 | |||
| 42f08e596f | |||
| 4f7bdb0958 | |||
| c6c5c3fd4e | |||
| 961c69776f | |||
| d311f87bca | |||
| 7d99e05f76 | |||
| 2c12464a20 | |||
| 6fc3d34abd | |||
| 7748cbbe7d | |||
| 432c12521e | |||
| c069932f5d | |||
| 33d3072e1c | |||
| eae8ee0389 | |||
| 6bb6a01765 | |||
| ada24def22 | |||
| 2184f87003 | |||
| e615269cb8 | |||
| 5f96ebc0be | |||
| 950c6a4f09 | |||
| d28b81dc29 | |||
| d1ab1fab1b | |||
| 9e5b549b4d | |||
| 25848a6094 | |||
| cbcb83f21d | |||
| 3bf5417258 | |||
| 1ebfeb7946 | |||
| 9c67196b83 | |||
| 90ab15cb7a | |||
| 9a50828b5c | |||
| 6c1b23554f | |||
| 239dd23f64 | |||
| 522c5b5533 | |||
| 9329e59700 | |||
| 2ba147ecff | |||
| 9773e5e0d9 | |||
| ddb6f9476b | |||
| 6636826f04 | |||
| 98dadc98e1 | |||
| d6fc34b459 | |||
| d426b58b9e | |||
| 1e82cd8457 | |||
| d18d47be67 | |||
| ff6f1492e8 | |||
| 7365f01d43 | |||
| 3a21d6da6b | |||
| 0aa40e9569 | |||
| 8036ceb7c5 | |||
| 6664ea943d | |||
| 5a6b138b00 | |||
| 7fe294bf07 | |||
| b85c59f997 | |||
| f9bc3f5771 | |||
| 0b13fb822a | |||
| 71a382319f | |||
| 01a14ebd8d | |||
| 9fa836a73f | |||
| b43cb09aaa | |||
| df27648bd9 | |||
| 93dccf527b | |||
| 90787fed81 | |||
| 73306d028b | |||
| ce2f4227ab | |||
| f0a4fc6cd6 | |||
| a5381495e6 | |||
| 83446a88d9 | |||
| 9fde13a3ac | |||
| e63a81dd25 | |||
| 217349016a | |||
| adb8c93134 | |||
| c69b082601 | |||
| ca1d66734d | |||
| 5e3c72842d | |||
| 0731fa1587 | |||
| a3998e76ae | |||
| b5625f131d | |||
| 44a5b4bbe7 | |||
| 7fc628d98e | |||
| 64ca855617 | |||
| 9d87eafd11 | |||
| a3b3638f6f | |||
| c96ca70f25 | |||
| 7b5eda32bb | |||
| c63d91dd1c | |||
| b2907cd06e | |||
| 2fec88ee02 | |||
| 7e03d2bd7c | |||
| 335dd5e68a | |||
| ea2600bd5f | |||
| 5c3d441ee1 | |||
| f5a236c3ca | |||
| 6b4c3ee234 | |||
| 79815bf666 | |||
| 5004d5af42 | |||
| 9ca21c838b | |||
| e0849a66ac | |||
| 6b081f04e6 | |||
| 0e31e06a75 | |||
| ea56d305be | |||
| d440e21f5b | |||
| 875c4ae48f | |||
| f09f42d4d3 | |||
| bac51fba3a | |||
| babd41e7fa | |||
| 974d083c7b | |||
| 983fef469c | |||
| 009fcb0ec1 | |||
| 11b13e94a3 | |||
| 1ce3fb5cc7 | |||
| 62f5804608 | |||
| 908230d261 | |||
| 24d5ad1dcc | |||
| 9ddf60b694 | |||
| 0e9899f451 | |||
| 48ac24020d | |||
| 7511f3dd89 | |||
| 980211a63a | |||
| 6bc966793a | |||
| db1a7f27a1 | |||
| b28020f590 | |||
| 3e1bc27e1b | |||
| f44ff574d3 | |||
| 264eb23912 | |||
| ccebcae75f | |||
| 92b3cb786d | |||
| cd656fb21a | |||
| 83fa8d9fb5 | |||
| 98edad418e | |||
| 96d21ad06b | |||
| 850795c487 | |||
| 1487b840d3 | |||
| bd0d3fd76e | |||
| dbeb7fb4e6 | |||
| cd77c750c5 | |||
| 3922a2497e | |||
| 00df3d4de0 | |||
| f81b6c95f2 | |||
| 632675ea88 | |||
| eaa6b9afc6 | |||
| 9bab9b83d2 | |||
| 64abd3e0aa | |||
| 837577256b | |||
| 90b7df444f | |||
| 119dc50e2a | |||
| 34a3c25a30 | |||
| 1a8e87be4e | |||
| b94cf7faac | |||
| 2eaa8b6e56 | |||
| 801aaa5508 | |||
| 56d4ba8ddb | |||
| c7f79815e7 | |||
| 15579e2d55 | |||
| 088fa7b759 | |||
| 073219b43f | |||
| 983c484fa2 | |||
| cefd51c50c | |||
| ca6ce3040d | |||
| 908cd5ea27 | |||
| 6e6c8c52ed | |||
| 23c6998bf4 | |||
| 65a89a8976 | |||
| 6d5049a24d | |||
| 23a2cea8cb | |||
| 99f9243de5 | |||
| 9d8fd2d40e | |||
| 6e2c28a14a | |||
| b8f43cb273 | |||
| 258ed2eaa8 | |||
| 50ee59578d | |||
| 1c9333584a | |||
| e25b6fe354 | |||
| 27c7b99015 | |||
| 99d4515572 | |||
| dc17f2a111 | |||
| 880854846b | |||
| d9fa1bad72 | |||
| a98b2ca8c0 | |||
| 83a41d39b3 | |||
| cd51893d37 | |||
| 248aeaa842 | |||
| c76c3cebed | |||
| eb59e9f705 | |||
| e184ad13cf | |||
| dfe012ad9d | |||
| c024ab98df | |||
| 9aeb0b9b8a | |||
| 715fa638a7 | |||
| 100e3b6f21 | |||
| 6c32d8bb95 | |||
| 760164d63b | |||
| 387217bd3e | |||
| 7d1bb7f256 | |||
| a1cb100460 | |||
| c11b6fd393 | |||
| 632682726f | |||
| 2b566c182e | |||
| 764f836d52 | |||
| d5831acb07 | |||
| ed6cd597cc | |||
| 5cb463a714 | |||
| afc24ea5d4 | |||
| 894812c652 | |||
| b20f11d4ca | |||
| 0304628590 | |||
| 1fc855e456 | |||
| 3c86b6f3c5 | |||
| b803b067bf | |||
| 896a0eb1fd | |||
| 0d6c17fc1b | |||
| a3085020ed | |||
| cf8a70bf68 | |||
| 6bb3edc300 | |||
| c6f682c1eb | |||
| 4d1c98c012 | |||
| 2f32dfd33b | |||
| e83d9f1c1d | |||
| ebba9e929d | |||
| 055e80cfad | |||
| b1e1a9f9b2 | |||
| fd8423321f | |||
| 0cd81fb99f | |||
| 90d3b787f6 | |||
| 84c0aa1868 | |||
| 331065e62d | |||
| 414e9e7122 | |||
| 3cdb38a7c0 | |||
| ebd45980a0 | |||
| 45634f87f8 | |||
| af1ee9e648 | |||
| 164c794eb3 | |||
| 801f2ac8c7 | |||
| bfec203d4e | |||
| f599623a99 | |||
| f26a353057 | |||
| 16ce15ed4b | |||
| f24232cd1b | |||
| 1b59b57b57 | |||
| 569da80ced | |||
| 43114b89ba | |||
| d6a677b14b | |||
| 27c1b656cc | |||
| 24df44d9c7 | |||
| 73be60c47b | |||
| 6806f8204e | |||
| 176d3b3079 | |||
| 9261c7f771 | |||
| 91d33c798b | |||
| 2926852f14 | |||
| e2810edc8f | |||
| c301faa92b | |||
| 81d6841b4b | |||
| dd4df80f0b | |||
| 1efc208ff3 | |||
| c45d0cf60f | |||
| bf89be77b9 | |||
| bf8d4bc674 | |||
| 74755c89b9 | |||
| 0ffc8eaf53 | |||
| f01b3e6680 | |||
| 78528742f1 | |||
| 12e0aa4368 | |||
| 80faf22b4a | |||
| d0e594f9db | |||
| 629b22adcf | |||
| 594ca6dead | |||
| 0df4e62da0 | |||
| f75bf05ce6 | |||
| 0d467fd6de | |||
| d8293e84f3 | |||
| 4d6c93e923 | |||
| 9b2badf3c9 | |||
| f78ebc22ad | |||
| bfe870be65 | |||
| 74ea432847 | |||
| 492bea9aa0 | |||
| e213900fa2 | |||
| 9f5f646442 | |||
| 9024b19994 | |||
| 3233b58ad4 | |||
| e6ec24fa88 | |||
| 599db139f9 | |||
| 835b76a46f | |||
| 7ead04ce14 | |||
| 1f82a5d910 | |||
| 8c67b529f6 | |||
| 7211541ade | |||
| 0f6017bee3 | |||
| 87c8fca9bc | |||
| 88def24c45 | |||
| 822f725a07 | |||
| fc84bd5254 | |||
| deff792bb6 | |||
| 9398058e19 | |||
| 90cda45e9e | |||
| 6bca56fdb0 | |||
| 365ccd0af2 | |||
| d039c679d2 | |||
| 7e0c5c731a | |||
| eeaa402cd4 | |||
| 7bb4271291 | |||
| 267587c258 | |||
| d891fd0ae0 | |||
| aeef4823ab | |||
| 0412f3d929 | |||
| 8742c95461 | |||
| 1240be3ed9 | |||
| b262577d17 | |||
| 83a2347952 | |||
| cea04a2443 | |||
| e1844d9a45 | |||
| 9fb7addd4d | |||
| 734d29b03d | |||
| 2818e50569 | |||
| 31c56f2e0b | |||
| 951ae99bea | |||
| 041eac2d6d | |||
| 3471ff0d35 | |||
| 18e5bdbec5 | |||
| f18ac4c28e | |||
| 359dc43837 | |||
| d98a384cb0 | |||
| 3e0cf49514 | |||
| 35d32308de | |||
| 81db12c3ba | |||
| 10724a8123 | |||
| a8d34e534e | |||
| e74c73a85d | |||
| e6c0019c80 | |||
| 495580dad1 | |||
| 71f94a8a1c | |||
| 81422c4e6d | |||
| 072750f4dc | |||
| 4621ad6f9d | |||
| a31d4a2971 | |||
| c8b0c1e551 | |||
| 4c09a96096 | |||
| 5565dcdd35 | |||
| 8a6881822a | |||
| 7a865821d9 | |||
| 70373a5f7c | |||
| c3783399db | |||
| d79e9c9a9a | |||
| d73eb552e8 | |||
| 9fcc532df6 | |||
| 76a1417f2a | |||
| 9fc8dcb2a0 | |||
| f2522869ea | |||
| 7cef764ec0 | |||
| 23dad8447c | |||
| d8e33dbd67 | |||
| 59b123bc50 | |||
| ba2378ced5 | |||
| e4e2a666c9 | |||
| 398bb03f98 | |||
| ce50305e5b | |||
| 1a948d7020 | |||
| 1c62e87b34 | |||
| d6eaf4e6d2 | |||
| 45841eaf7b | |||
| 0dddc1494d | |||
| 75a23d24af | |||
| 798b3b3899 | |||
| 8af25b1664 | |||
| 6b2200fc88 | |||
| c824d15aa1 | |||
| b6ea0f43ae | |||
| 5daca95ddd | |||
| 54abc67aec | |||
| 00204f2b4c | |||
| a3c5883f2c | |||
| daf8bebcdd | |||
| 345c23a60f | |||
| 7e98e211f0 | |||
| 6be7cdda66 | |||
| ced0a94204 | |||
| 067395d5c5 | |||
| 698f9e3d7a | |||
| c11b3e2926 | |||
| 2a34d5b71b | |||
| c9270086ea | |||
| 577a03664d | |||
| 7c6812645a | |||
| 939148b050 | |||
| 783a616999 | |||
| 80327a13ea | |||
| 654e051e2a | |||
| fa2ccbc081 | |||
| 2ab78325f0 | |||
| 631be27078 | |||
| b0f7db73cd | |||
| ea89bec185 | |||
| fd2f17a7a1 | |||
| 5eab3cf6bc | |||
| 7dce8dc7ac | |||
| eed46f38b7 | |||
| b1de7ae08a | |||
| 357db7098c | |||
| f9c5317db2 | |||
| 28e608a2c2 | |||
| 1efa0a7552 | |||
| d0c9fe277a | |||
| 5ca054757f | |||
| 9e80fc7b2f | |||
| 158e82e061 | |||
| 9d00f78f16 | |||
| b668a740ca | |||
| bc1715c1e0 | |||
| 36883c1192 | |||
| 6e5291a915 | |||
| fa84ae26d6 | |||
| 63e3827c6b | |||
| 645713e2cb | |||
| 73f6e9817c | |||
| 77676c27d2 | |||
| 344126fe58 | |||
| 5b7fb6a4a1 | |||
| 6f68d559ab | |||
| 1ab25c49d3 | |||
| b03872aae0 | |||
| 518ba748e0 | |||
| 18601c3b6e | |||
| 6e7102cfb3 | |||
| deceb00161 | |||
| eeb70cdd77 | |||
| ed9b84816e | |||
| f86ed23189 | |||
| cfa0380515 | |||
| 300ec3003c | |||
| 1c37746892 | |||
| 7e17f09fb5 | |||
| 8a2be93b4e | |||
| 562f864038 | |||
| 8618bf15d6 | |||
| 2fa8737c44 | |||
| f15f087143 | |||
| fae4d1c266 | |||
| b8e924e10d | |||
| 767bc3ca68 | |||
| 343c094f21 | |||
| 80caf79d07 | |||
| bb3bfa2d29 | |||
| 29cbab98f0 | |||
| a4c9338b83 | |||
| b670c26684 | |||
| b67fa1a8d2 | |||
| 286d5bb6b7 | |||
| 478e456e83 | |||
| 12726f8556 | |||
| ac1b449cc9 | |||
| 3e52915fa7 | |||
| 228f52867c | |||
| a80778f40e | |||
| 3df1d2d144 | |||
| a436574bfd | |||
| d0f8b9a978 | |||
| a557836a70 | |||
| 655fd06853 | |||
| e5812462fc | |||
| 4775ec354b | |||
| cb6d54bfda | |||
| f79a7dc661 | |||
| a241011057 | |||
| e37ca8e11a | |||
| ceae85ad60 | |||
| 71883b6ddc | |||
| 8d5a47c79b | |||
| 79e4a6a25c | |||
| bbaaec046c | |||
| 1c12ee0e55 | |||
| 65c75fc587 | |||
| fb393ad994 | |||
| 90debb9ff2 | |||
| b98ff88544 | |||
| 3a2c4e6f63 | |||
| 4e3f745ba4 | |||
| db0795b5d0 | |||
| 7f74084528 | |||
| c37815f130 | |||
| 73fcebf7ec | |||
| 59941c5d1f | |||
| 15dda5ea32 | |||
| 01ffc65e9b | |||
| 825697cad4 | |||
| 1fa93ca1ea | |||
| ca6bdb28f6 | |||
| 61d9ee45e3 | |||
| ff36e6d8d7 | |||
| e516a34a15 | |||
| 9d0d1cd339 | |||
| 15d897ff4a | |||
| f25e9b6f77 | |||
| a5a06a851e | |||
| 1718fb9e74 | |||
| 9a399ead25 | |||
| 3376adc051 | |||
| e4baa68ddb | |||
| 149dc376aa | |||
| 407093b3fa | |||
| c7be096c39 | |||
| a305067f2d | |||
| 3492a6ec17 | |||
| 33adab2b91 | |||
| a1f1dce0ae | |||
| 62c1fc3c1e | |||
| 284572efc0 | |||
| ed6ba93912 | |||
| 81a911cce5 | |||
| faef6f6191 | |||
| 5664327c24 | |||
| 3b29322d4c | |||
| fc624716aa | |||
| f516cf3956 | |||
| d72fa2a0f6 | |||
| bcc99fd92e | |||
| a26ce4dee1 | |||
| ec5d6c6a70 | |||
| fe9aab1055 | |||
| 5c5f67a256 | |||
| db90e12114 | |||
| d0724d0794 | |||
| 7711403bbd | |||
| 8bb166db5d | |||
| f09d999641 | |||
| dd7a958fd6 | |||
| d35405b7a3 | |||
| 3e89fca543 | |||
| 128cfdee9b | |||
| e778dd854d | |||
| 04b602f96f | |||
| 64a971a915 | |||
| 036831e279 | |||
| 41a13a6375 | |||
| 0c88c856d5 | |||
| 8efc6dd544 | |||
| a2978465a2 | |||
| 01b68be34f | |||
| 3d2096f516 | |||
| ca31abc6d6 | |||
| 8e5587fb79 | |||
| cce3089b65 | |||
| 641a8decdc | |||
| e347725d8c | |||
| 94c99db34c | |||
| 7ffa817390 | |||
| c5f35e61db | |||
| abc43ffbff | |||
| 8ac840ff87 | |||
| a0d386455b | |||
| ea636440d1 | |||
| a4df2e0113 | |||
| 77d397202b | |||
| bbc0c86f9b | |||
| 5e289f69bc | |||
| 2cff4bd8f3 | |||
| 55397dfb9b | |||
| b6938916ac | |||
| d303f84e7b | |||
| 2fde5a2489 | |||
| 2f1c745cde | |||
| 83bc5235cf | |||
| d7c62661a3 | |||
| f349826a57 | |||
| f061606277 | |||
| 805c21aeba | |||
| d000195ee6 | |||
| 3c6efd0ca3 | |||
| 3f5ccb183e | |||
| 3cb51299c3 | |||
| 18a879f475 | |||
| d803409215 | |||
| a468870fd2 | |||
| 855ff0e91d | |||
| d064009b72 | |||
| a701a0cee1 | |||
| 59a1aefb1c | |||
| 69f4f058fa | |||
| a648ff738c | |||
| 9ed09cb4a3 | |||
| d3549b66af | |||
| a096e2a88b | |||
| 71b4750517 | |||
| 43a4e1bbe4 | |||
| 46ccbb42fc | |||
| bbc707cf39 | |||
| 9c391277cc | |||
| 1bbdbacd5b | |||
| 955d7ecb57 | |||
| 031ad4eb37 | |||
| db0a9ee6e0 | |||
| a4d07b983a | |||
| d3418a94ff | |||
| 56e98ba81a | |||
| 8669598abd | |||
| 1b8613acb3 | |||
| 8e3b1c860f | |||
| f1971bf303 | |||
| cc0135134b | |||
| dc667ce1a7 | |||
| 7140363e09 | |||
| a52d56c8d9 | |||
| e92bcb7eb6 | |||
| cbb368ca06 | |||
| b6d4284b26 | |||
| a1faaf9962 | |||
| c7780700f5 | |||
| 76f0d99f02 | |||
| 8e9526b4b5 | |||
| 7bd11dda6f | |||
| c3248cf122 | |||
| f2ac50cb55 | |||
| 4cbdc7d910 | |||
| dd2add9f6e | |||
| df160af736 | |||
| 5b7b78e088 | |||
| 866d73ca26 | |||
| d461472948 | |||
| f24a228a93 | |||
| c8ed1c82c8 | |||
| 5c00e344c1 | |||
| 0b51532ce9 | |||
| 110394b2ba | |||
| 5a5c4349e8 | |||
| 8ade204098 | |||
| 47f0e3cfb7 | |||
| 8938b546bf | |||
| 1ca52567a4 | |||
| 28e64ad5a4 | |||
| be5bf7b81b | |||
| 80eacb8f16 | |||
| 33e72b08d5 | |||
| 9b312f9d41 | |||
| 40ed717232 | |||
| 7296f1010b | |||
| 5d67aa21ae | |||
| 3fd71c4431 | |||
| fe92755b99 | |||
| fbf5455a86 | |||
| f19dad61c7 | |||
| f69dbecc38 | |||
| 90df44f0aa | |||
| 707f9e9241 | |||
| 137e20a846 | |||
| d5712f7cac | |||
| 9c58b236ef | |||
| 413f41921b | |||
| 386a93f0f8 | |||
| 2d103546ef | |||
| 1748fdf657 | |||
| 36fc52a3b4 | |||
| 371c5ddfad | |||
| 5505cf7014 | |||
| 9cb97c0c0f | |||
| 95854c4a2f | |||
| d2100428d3 | |||
| 597ba7feb3 | |||
| 6a43dc9d7d | |||
| a09da4eeb0 | |||
| 57b5cb3eaa | |||
| c03c0dfd23 | |||
| 4f15e5a267 | |||
| 18e1f751f1 | |||
| 31e5b5ff22 | |||
| 3d57c51111 | |||
| c999a3e505 | |||
| 030faccb8d | |||
| 6709739a05 | |||
| 29570db25b | |||
| 2e2f9fed55 | |||
| c28273793e | |||
| 4c12860f7a | |||
| b040bff6df | |||
| fafd4c86ec | |||
| 6aa919469d | |||
| 89896fe04f | |||
| fdc05cd68f | |||
| 854ec5784e | |||
| 9a24e0cf76 | |||
| b72f9d340e | |||
| 51ae203290 | |||
| ec6fb25c21 | |||
| 418589244d | |||
| 58d75aa310 | |||
| 6a73382706 | |||
| dc4e9e5cb3 | |||
| 67a8be8e90 | |||
| 07bc8efbc3 | |||
| 63e36007ee | |||
| f2538c1274 | |||
| a5df980c5b | |||
| 40a39ab650 | |||
| 7c3a15ace9 | |||
| 981a5c8c17 | |||
| e6cff60b4c | |||
| 4b82c485de | |||
| 8ae1044f80 | |||
| aae74065df | |||
| a7d3794a29 | |||
| fe0f552e00 | |||
| 348e19aa21 | |||
| c2407fdd88 | |||
| f116cf599c | |||
| 6e61e06051 | |||
| 02110485b0 | |||
| e1d89cb24d | |||
| 0558c9cb9b | |||
| 81babb227e | |||
| 31a3a73ee3 | |||
| 7c1697562a | |||
| b81ab431f2 | |||
| 2d8559731a | |||
| 72c36b9ea2 | |||
| e57d00ee10 | |||
| ecabbf6d28 | |||
| 608a8f5b56 | |||
| df3961121f | |||
| 1d18930462 | |||
| f7eba09007 | |||
| 2a64107e44 | |||
| c0707a85d2 | |||
| ade3cdf5ad | |||
| 076602bdc4 | |||
| 5909f71028 | |||
| a1994a71ee | |||
| 3a9a9f7861 | |||
| 693606a75c | |||
| c0443df593 | |||
| 2403a66598 | |||
| 4d18199902 | |||
| 9f75565ea8 | |||
| 4735c2af07 | |||
| ba089c780b | |||
| 9660ba1cbd | |||
| 1c71ecc880 | |||
| 07f4cd73f6 | |||
| 5c877fe94a | |||
| 79526f82f5 | |||
| 9626e0458c | |||
| 2d73591a18 | |||
| 0eb973b0d9 | |||
| a03fcf570d | |||
| f71b1bb05a | |||
| 8e651f56b7 | |||
| 808bb8da7e | |||
| b016dd16c9 | |||
| 2a4ef098d6 | |||
| 00c4e39581 | |||
| 169fea6855 | |||
| 3520be7824 | |||
| 0cb163865a | |||
| 2670b0d682 | |||
| 35401fe50f | |||
| e4679cddce | |||
| 1d87b37d10 | |||
| 4cb9b60558 | |||
| 5482822a2b | |||
| fc1bb1f867 | |||
| 21451ec6ba | |||
| f230d91b43 | |||
| d0383e4daf | |||
| e9217da5ff | |||
| 9ecd83dace | |||
| 35ff345fc9 | |||
| 552c44a9b1 | |||
| ee53de7aac | |||
| f8fb4335c9 | |||
| bebaa14039 | |||
| 18fb93530b | |||
| 2d5d86e037 | |||
| af077b15e2 | |||
| 3268ebd229 | |||
| 6c5297a423 | |||
| 9200a759d7 | |||
| 1f179f095f | |||
| 1eaf44e713 | |||
| 71e4693f08 | |||
| f9f395b21c | |||
| 75a97af6bc | |||
| 8b388827b5 | |||
| d425a4d60b | |||
| 1eb89ddf73 | |||
| 7f998b1b83 | |||
| fb0d2f1da1 | |||
| 3ba417e1a8 | |||
| ce158a076f | |||
| 7a03519975 | |||
| 96fa9a8a70 | |||
| 33508ae310 | |||
| f7e4a7cdfa | |||
| a7ca6d738b | |||
| cca75e7884 | |||
| bf119c0568 | |||
| ff98b041da | |||
| 9ddc3f1a12 | |||
| 5bfcd0485e | |||
| cae641ff26 | |||
| 254ebb979c | |||
| ecb923da9c | |||
| 40255ab002 | |||
| e4fbf3e2cc | |||
| de276de1c1 | |||
| 7edb51f3a5 | |||
| c835bc85c2 | |||
| 285b1241e3 | |||
| 8101924a68 | |||
| 48cbf267c9 | |||
| f434bfc623 | |||
| 96e83506d1 | |||
| 3b48806f75 | |||
| 0cb2c90890 | |||
| 1efb2ae7fc | |||
| a59fdd1627 | |||
| 893d0d64fe | |||
| f42816e7fc | |||
| f10b925015 | |||
| 75904dae66 | |||
| 7fd54b55a3 | |||
| b0eaff36e6 | |||
| 611961ade7 | |||
| afc7dcd94d | |||
| 61399e5afe | |||
| ffc2935405 | |||
| 9f693a0c48 | |||
| 61a12f790d | |||
| ef47b2c03a | |||
| 7ea12db3f5 | |||
| 08c6e456a3 | |||
| 6c9c131780 | |||
| 7ffe47c888 | |||
| 4f2164e40e | |||
| 821de121e8 | |||
| 7469d03b1c | |||
| 0b51fba20b | |||
| 34a83faabe | |||
| d5faa74cd6 | |||
| 0b77d66a6d | |||
| 83b1e6ac9e | |||
| 572c24cfa2 | |||
| f19a78a634 | |||
| d100ad99c0 | |||
| 66fc8d25a5 | |||
| fbaf05bd92 | |||
| e85855f2c4 | |||
| b3d834ae11 | |||
| f3776df0f3 | |||
| 5ab93083e4 | |||
| c356290c8d | |||
| 76c0bc06d5 | |||
| b90791e950 | |||
| b0ee7c7df3 | |||
| ecf15ebf3b | |||
| 4a666885b5 | |||
| adb5c79ff2 | |||
| 2421e54f8c | |||
| 41aa0e8003 | |||
| 1ab8dc44b3 | |||
| f0d22b6363 | |||
| 1e9ac5a7cf | |||
| 0b84b9fd8a | |||
| f671997ef7 | |||
| bd41e8292a | |||
| d49c43ff78 | |||
| 91caf2462c | |||
| 49a69d5b78 | |||
| 96e7ee7238 | |||
| 8da47b078d | |||
| 8c276b9c92 | |||
| 3c28a2daac | |||
| a36f981d1b | |||
| 5afca00b47 | |||
| 49108288ba | |||
| 5340d1f21f | |||
| 10bd1ddb39 | |||
| d5478b939d | |||
| 07ab8d7af6 | |||
| d474022639 | |||
| bcd8dc6b48 | |||
| 73fe2e7385 | |||
| 3e7656f7ac | |||
| abd397e954 | |||
| d75d49a51d | |||
| d5910b312f | |||
| 289cf4d2b7 | |||
| cb7b77a8a2 | |||
| 84a0b522cf | |||
| c4336ecbbd | |||
| d52e98ff9a | |||
| 71f71ddb3e | |||
| b5d884d25c | |||
| 7fd1d42a01 | |||
| 21637d4924 | |||
| de2696f68e | |||
| 88b317739f | |||
| 45d767297a | |||
| 361620954a | |||
| cc7968227e | |||
| ce02550d50 | |||
| cf26a0c85e | |||
| 44b82c777f | |||
| ee4647bd5c | |||
| 7c6000e412 | |||
| 668aac45d2 | |||
| 8742baa531 | |||
| cf62bdc962 | |||
| b632145273 | |||
| ae98d45991 | |||
| f2f329408d | |||
| bdfe21ab24 | |||
| c536c2a480 | |||
| f873b55e43 | |||
| c9cb7f8a0f | |||
| b18509c208 | |||
| 7bddbf5961 | |||
| f6f382532b | |||
| d9daad98c7 | |||
| 9d5c49546f | |||
| 16263f9685 | |||
| abb23a78ba | |||
| 4374eaea78 | |||
| 70d99980de | |||
| c110c41fdb | |||
| 6637a77f80 | |||
| 0d07a23c04 | |||
| c987545592 | |||
| 4f3a54bfc8 | |||
| c4403006b8 | |||
| b21402fc86 | |||
| c14a22272f | |||
| 870320a24e | |||
| 25a31953e8 | |||
| ce9eade29c | |||
| 5680a11063 | |||
| 1e5b31c388 | |||
| ee20201d33 | |||
| e3ea5d1d8d | |||
| fedac786d4 | |||
| 67b422662c | |||
| 1b92564330 | |||
| 12290c0d5c | |||
| 139affaa8d | |||
| 91ccbae788 | |||
| c0c2088333 | |||
| 8e5d84fcc1 | |||
| 0669c1fcd1 | |||
| 5d3b8daad2 | |||
| aa92a184d2 | |||
| 07bf43074f | |||
| fa963ecc59 | |||
| c8eb8157b8 | |||
| 99f750d64e | |||
| 7485caefb0 | |||
| afaa335851 | |||
| 176cd1ce1b | |||
| 041a901f32 | |||
| e0e55bc550 | |||
| c3ba645237 | |||
| a5a8a6175f | |||
| a7dafe2f41 | |||
| 9f374c8252 | |||
| 72e506b22e | |||
| ea52f82455 | |||
| 26db31e0c0 | |||
| 6f70bb8c69 | |||
| 05d4232f63 | |||
| aac3551407 | |||
| 2cf3447e0a | |||
| 0cdfcca24b | |||
| e70cdf083d | |||
| 454455c695 | |||
| 3de31f8d28 | |||
| da06afafc8 | |||
| 2e2c0375c3 | |||
| e7cf2ccd15 | |||
| e631383d4f | |||
| f21dfe36ba | |||
| 22333945fb | |||
| 337802783f | |||
| 4193aa9f81 | |||
| f3386d9383 | |||
| 56c84863a1 | |||
| 0b3d45eb64 | |||
| 3916b334a8 | |||
| 44455eb5b6 | |||
| 33753d9139 | |||
| d32ce2c8df | |||
| d08a338c3b | |||
| 0477b307c7 | |||
| f9abf73e31 | |||
| 26858f27cb | |||
| 035fea5315 | |||
| 694d4fcbb6 | |||
| 3e20c2e871 | |||
| f12e4d8da7 | |||
| fb6c70a91d | |||
| e44b939e71 | |||
| 6e72fd094c | |||
| 14b3aa3b3c | |||
| ca99a2d500 | |||
| 7da3ef24cd | |||
| 74ce8de7d8 | |||
| 05db5bc1af | |||
| 9629e2c676 | |||
| 5b322a36db | |||
| 1a237d7f42 | |||
| df99f8c5a1 | |||
| 0be9ae7b3e | |||
| be7f2aacce | |||
| 8f8d69716a | |||
| 2276bf69b7 | |||
| d7929899da | |||
| a67e747889 | |||
| e18f786cd5 | |||
| 022525b003 | |||
| 7627dde1f8 | |||
| 74d0bcb6ff | |||
| 155c782a2c | |||
| 2aef2f0bbc | |||
| 2f17464266 | |||
| 9d2398fd99 | |||
| 70d97ddd60 | |||
| 872403be1c | |||
| dd6b2e05e1 | |||
| d409aca326 | |||
| 7246d3c2f9 | |||
| 2e31176557 | |||
| 8aba81a0b6 | |||
| 94e55253ae | |||
| 2b07b9e5ee | |||
| 1806eabf59 | |||
| 1c7253cc5f | |||
| b5d330d118 | |||
| 90f6e73a35 | |||
| ef99852961 | |||
| 7a9aae1044 | |||
| 268d4f2099 | |||
| b4fcd59a5a | |||
| 15e53c4e87 | |||
| f03c0c1423 | |||
| 4321c54125 | |||
| 727a79b305 | |||
| cd286c2145 | |||
| 28d0ba35d7 | |||
| 8fda532c3c | |||
| ba10065c4b | |||
| 070dcf1c02 | |||
| 1c542df7e5 | |||
| 2f3a421018 | |||
| d5319793c4 | |||
| 27e015bd54 | |||
| 13d9135fa5 | |||
| 076a207935 | |||
| 73f2c342f5 | |||
| 3835e1e651 | |||
| f88c104d8f | |||
| 30968d70af | |||
| de890ae67d | |||
| d7d36181fd | |||
| 151e4ab4e7 | |||
| 88e5bef58f | |||
| 568c0ffb7e | |||
| 7daacf00df | |||
| a44f112fb9 | |||
| 60a5babd57 | |||
| 124409d075 | |||
| e99071f105 | |||
| dfb61caf77 | |||
| ba973342e3 | |||
| 8df7dfd2a7 | |||
| 237fad339c | |||
| f1e4db2aa8 | |||
| d7906165a3 | |||
| d2e2577dd3 | |||
| 00337e9687 | |||
| 9eddf44b7a | |||
| 8e11de0e86 | |||
| 68f7064a3e | |||
| 8d6b9d717c | |||
| c8f2712199 | |||
| 89d6272898 | |||
| b340a910ed | |||
| f02805da6f | |||
| 1d4d070256 | |||
| 1724cee8c4 | |||
| 9b45d0f878 | |||
| 9a3b173cd3 | |||
| ad90868627 | |||
| e5b1048bae | |||
| 8a62835577 | |||
| 93d2fff071 | |||
| 1a2b40cb53 | |||
| be36cf92fb | |||
| 2a5663c280 | |||
| f96ce1c241 | |||
| 3c1b6f594e | |||
| 0e4cc050d6 | |||
| ac29353abe | |||
| fa735208c9 | |||
| c7058d8224 | |||
| 22838f19fd | |||
| 7f84fc571a | |||
| 04c69db399 | |||
| 5c6a19a94a | |||
| 3df4367244 | |||
| 6d73c92cae | |||
| 36174696cc | |||
| 228cdd6a6e | |||
| 3cf2020c6b | |||
| a88a0e4413 | |||
| 3f07cd419c | |||
| 55fbfea369 | |||
| cef2a8f900 | |||
| 328a86d2af | |||
| 7f4226f9e6 | |||
| 070507df1f | |||
| da10de8466 | |||
| 3b0d2fa30e | |||
| 9c1bdb5b61 | |||
| 842f3bf049 | |||
| 098a89f312 | |||
| dfce409691 | |||
| 079bfb32fb | |||
| 438f2730a0 | |||
| 4c3ac4a7d8 | |||
| 932543f77e | |||
| a67413ccc8 | |||
| cb26b035c6 | |||
| b915ba9dfe | |||
| dc580dd4c7 | |||
| f873a3edb2 | |||
| d36680df54 | |||
| ec276d6aba | |||
| 6e011690a9 | |||
| beaf66b1f3 | |||
| bab6ad01aa | |||
| ae1d03fc51 | |||
| 4e5f88b74f | |||
| b92d68421d | |||
| 66085a1321 | |||
| b82bfbd0c3 | |||
| 5b6cafb11b | |||
| 8ad5c591cd | |||
| bd847ce7d7 | |||
| 6e85bccafc | |||
| fbcc5ff9fb | |||
| 69eba0ab19 | |||
| bc3e57d551 | |||
| ef1b8b2ae5 | |||
| e16d46843a | |||
| 7d709e55ed | |||
| 44286b94d3 | |||
| 1cfd974868 | |||
| 777faa8ae7 | |||
| b8c9ea0010 | |||
| abd7110e21 | |||
| 4d456542e9 | |||
| 0e64fec1ab | |||
| 3a52b65795 | |||
| 86a630702d | |||
| 3775550c4b | |||
| bf2c36a920 | |||
| a2c8c8ef00 | |||
| 82f6abd98a | |||
| 7dd29ed2f1 | |||
| 8efc0ec91a | |||
| 0919389d9a | |||
| fd97761c5a | |||
| ecd15667f3 | |||
| 56e2ee4ead | |||
| 8cd56e3036 | |||
| 578d23e061 | |||
| 47a06d88a0 | |||
| bfb9b540d4 | |||
| c1bc709c35 | |||
| 87d60b6e19 | |||
| 638fe7f5a4 | |||
| 4e0f24348f | |||
| 624a5644cc | |||
| 9b71fc9a18 | |||
| 95ec1d08be | |||
| e4e0ee14bd | |||
| a424892fab | |||
| 33c01368b1 | |||
| c544194611 | |||
| 0752069617 | |||
| c5a94a6100 | |||
| 488a664151 | |||
| 4c81960b9b | |||
| 6d6c326737 | |||
| 0d81fc853e | |||
| 19e9964780 | |||
| 1aec940587 | |||
| 22e1af6859 | |||
| 260ac7d9a8 | |||
| be916cb3fb | |||
| 5875aaf762 | |||
| 40f14ff545 | |||
| e703e4dfe1 | |||
| 898ce064f8 | |||
| d147671c6c | |||
| 2c1d5564ad | |||
| 08bd8f9f39 | |||
| 8aa3b753bd | |||
| 621e7a2529 | |||
| c55badcee0 | |||
| 788e632622 | |||
| 0f9ebb0b43 | |||
| 66adb71734 | |||
| 5ff9cd158a | |||
| 7f5367e0b1 | |||
| e1d4179b64 | |||
| 383ef96747 | |||
| 5adb39e757 | |||
| 99b189df6d | |||
| 3e9420add1 | |||
| cde42c4354 | |||
| 74c5035808 | |||
| fe25eefc15 | |||
| 412793275d | |||
| 447fffb21f | |||
| 80889a0226 | |||
| 4e6a55751a | |||
| f62f992cf7 | |||
| 67d10960ae | |||
| d9d387afce | |||
| b7141a1bc6 | |||
| bfbe68f035 | |||
| 0ef9bc923a | |||
| 49cba6e543 | |||
| 0993586758 | |||
| 376e65a674 | |||
| 86f23a1944 | |||
| 5a8c6e771a | |||
| e76d71521c | |||
| d844db4005 | |||
| a701c9b321 | |||
| b3261e7ace | |||
| d889e0b71b | |||
| f8e98d6779 | |||
| 3ddce1d74c | |||
| 4428aefc63 | |||
| 3b43b01872 | |||
| 4b8f3e8f32 | |||
| 18a3cef7d5 | |||
| 1f5d9513d8 | |||
| 0f9fc4fbde | |||
| 700331b5ec | |||
| 573dde9b44 | |||
| 5f25a5f367 | |||
| f382a8decd | |||
| 639f4b7190 | |||
| d4e7934ac3 | |||
| 1e68c28670 | |||
| 2a4fef837a | |||
| 751e246087 | |||
| fa218e648a | |||
| c9e8c51946 | |||
| da26bae61b | |||
| 3e1cd8241e | |||
| 81ee29ee8d | |||
| bb04edb45b | |||
| d7092d592c | |||
| 51261167b4 | |||
| 17177e7379 | |||
| 6596e3d566 | |||
| 4bc4601192 | |||
| 177a721205 | |||
| df85a0ff0b | |||
| 9ca788b2e8 | |||
| a5997dd81a | |||
| edfc8f8225 | |||
| 09cfd12235 | |||
| 43a237f15e | |||
| 877ef2c6ca | |||
| 851ef592c5 | |||
| 036483fae5 | |||
| 9c2e0a4acf | |||
| 7fe98d8c18 | |||
| 89f86f9661 | |||
| e17ea08e24 | |||
| 2431fea98a | |||
| d9e60f4f0d | |||
| e84470ef81 | |||
| 07d055f849 | |||
| 48b438ff2a | |||
| 69629c4f0f | |||
| bf34a252b8 | |||
| 528d3f327b | |||
| 56301bd9e8 | |||
| d6c5469712 | |||
| 54a31f50fb | |||
| c19b8e4ae0 | |||
| 6dce6dda1b | |||
| c56d921dda | |||
| 1c5079952f | |||
| 58b302caf3 | |||
| 439fac723a | |||
| 23b7138ab4 | |||
| 5ce8d29abe | |||
| d688af19e5 | |||
| 45dc04f33d | |||
| 770b15b58c | |||
| 248314772f | |||
| 03c2c762a6 | |||
| 3edfa1d6aa | |||
| f4d41fe33e | |||
| 61ed889005 | |||
| 8abfee9ec3 | |||
| 82628b0fc9 | |||
| 0700983090 | |||
| 75feacf172 | |||
| 15a2fc88a6 | |||
| cd6a59d5c1 | |||
| 45de313a9e | |||
| ade05b6cef | |||
| e9c09052a4 | |||
| 8fcc6507ce | |||
| 6e3e1c959e | |||
| 7ce83b4931 | |||
| 9f81f1cba8 | |||
| 7afd00a661 | |||
| a0dcefa382 | |||
| 31adbb247c | |||
| dda1adad6d | |||
| 0053c0e052 | |||
| bd5363cc83 | |||
| dc89441167 | |||
| 320b7a7e01 | |||
| 386e86e222 | |||
| 4446c02b8a | |||
| 1615360c71 | |||
| 6dc6c716c5 | |||
| 904158ac4d | |||
| 0f65d8cbbe | |||
| 1dea291a02 | |||
| f3e0218fbb | |||
| 78ef1a9930 | |||
| 6c1d0bc066 | |||
| 0820bb0555 | |||
| f5891c3821 | |||
| 764a7923ec | |||
| bb464289ce | |||
| 92c0f2fb90 | |||
| 9e136ff57c | |||
| 7bddb45a6f | |||
| dbed1c5d94 | |||
| b3cfd97946 | |||
| 81a1e12469 | |||
| d3f24dfad7 | |||
| ecc4f1bdfa | |||
| c2c2ca0fdb | |||
| 1569610f2d | |||
| e1b2949ae6 | |||
| 899883644f | |||
| e2ae9c0b73 | |||
| aebd83230f | |||
| 651bfb7ad5 | |||
| 5ed50a93fb | |||
| cc412edd42 | |||
| 2f259b228e | |||
| 7c789c337d | |||
| 7af0777910 | |||
| c1689ac301 | |||
| 4a790c40b1 | |||
| 6be46a6e64 | |||
| 5f07d8f11a | |||
| 35071007cb | |||
| f1f23ad171 | |||
| 2a91f6071f | |||
| c51e533a5f | |||
| a76c3f9cb0 | |||
| bb9c5ead54 | |||
| a12ab0a8db | |||
| 4d6dfbd376 | |||
| 23edebc079 | |||
| cbfcfce205 | |||
| 19e4ebbe3f | |||
| 594202a934 | |||
| 38084507c4 | |||
| 9ffda216ec | |||
| b5d73976ad | |||
| 22e7c4edaf | |||
| 2195c0d5f9 | |||
| ebb32261b1 | |||
| d51b589404 | |||
| 63ed224b7c | |||
| a95158518d | |||
| d73957899a | |||
| cd69bc9c87 | |||
| 391db836ab | |||
| 963529e29b | |||
| f7978f70ec | |||
| 1e4a191366 | |||
| c50783e388 | |||
| 6971556ab8 | |||
| b350662955 | |||
| f5bcde0b2f | |||
| 5c3b32d44d | |||
| 2dc8cb8734 | |||
| 0a4ed7192e | |||
| ae50ad91ea | |||
| 60f791631b | |||
| a6a6d9e638 | |||
| d8b641c839 | |||
| c6acbdd50a | |||
| df7cd9e4e4 | |||
| 6a17b3c51b | |||
| 04e9a6f512 | |||
| 9478590630 | |||
| 795b3e76ff | |||
| e31a472801 | |||
| 4f2b6579bf | |||
| ca559826c4 | |||
| d2de5b9d8c | |||
| d83d295763 | |||
| f6de000305 | |||
| 15749bfc10 | |||
| da2e47ad15 | |||
| 528c288fa9 | |||
| 702f589848 | |||
| 22d2fded2c | |||
| fc9faa8a47 | |||
| ecfddc6034 | |||
| 93f0c5fc72 | |||
| 6c3b131516 | |||
| f83b35b77d | |||
| 4e63c90720 | |||
| 7e957237e4 | |||
| 302a4813a5 | |||
| f71a4577b8 | |||
| a3e0dbba95 | |||
| 0f92f76ca3 | |||
| 4094958df2 | |||
| 7d8b395afa | |||
| 927904bc91 | |||
| 294edfd83d | |||
| de5e4864cb | |||
| e4e35296fb | |||
| a9f24a16bc | |||
| 4b543c3007 | |||
| 2e6797cc7d | |||
| f0340eccf9 | |||
| ec94f4e0f8 |
@ -1,88 +1,124 @@
|
||||
version: 2
|
||||
jobs:
|
||||
build_py3_torch_and_tf:
|
||||
run_tests_torch_and_tf:
|
||||
working_directory: ~/transformers
|
||||
docker:
|
||||
- image: circleci/python:3.5
|
||||
- image: circleci/python:3.6
|
||||
environment:
|
||||
OMP_NUM_THREADS: 1
|
||||
resource_class: xlarge
|
||||
parallelism: 1
|
||||
steps:
|
||||
- checkout
|
||||
- run: sudo pip install torch
|
||||
- run: sudo pip install tensorflow==2.0.0-rc0
|
||||
- run: sudo pip install --progress-bar off .
|
||||
- run: sudo pip install pytest codecov pytest-cov
|
||||
- run: sudo pip install tensorboardX scikit-learn
|
||||
- run: python -m pytest -sv ./transformers/tests/ --cov
|
||||
- run: sudo pip install .[sklearn,tf-cpu,torch,testing]
|
||||
- run: sudo pip install codecov pytest-cov
|
||||
- run: python -m pytest -n 8 --dist=loadfile -s ./tests/ --cov | tee output.txt
|
||||
- run: codecov
|
||||
build_py3_torch:
|
||||
- store_artifacts:
|
||||
path: ~/transformers/output.txt
|
||||
destination: test_output.txt
|
||||
run_tests_torch:
|
||||
working_directory: ~/transformers
|
||||
docker:
|
||||
- image: circleci/python:3.5
|
||||
- image: circleci/python:3.7
|
||||
environment:
|
||||
OMP_NUM_THREADS: 1
|
||||
resource_class: xlarge
|
||||
parallelism: 1
|
||||
steps:
|
||||
- checkout
|
||||
- run: sudo pip install torch
|
||||
- run: sudo pip install --progress-bar off .
|
||||
- run: sudo pip install pytest codecov pytest-cov
|
||||
- run: sudo pip install tensorboardX scikit-learn
|
||||
- run: python -m pytest -sv ./transformers/tests/ --cov
|
||||
- run: python -m pytest -sv ./examples/
|
||||
- run: codecov
|
||||
build_py3_tf:
|
||||
- run: sudo pip install .[sklearn,torch,testing]
|
||||
- run: python -m pytest -n 8 --dist=loadfile -s ./tests/ | tee output.txt
|
||||
- store_artifacts:
|
||||
path: ~/transformers/output.txt
|
||||
destination: test_output.txt
|
||||
|
||||
run_tests_tf:
|
||||
working_directory: ~/transformers
|
||||
docker:
|
||||
- image: circleci/python:3.5
|
||||
- image: circleci/python:3.7
|
||||
environment:
|
||||
OMP_NUM_THREADS: 1
|
||||
resource_class: xlarge
|
||||
parallelism: 1
|
||||
steps:
|
||||
- checkout
|
||||
- run: sudo pip install tensorflow==2.0.0-rc0
|
||||
- run: sudo pip install --progress-bar off .
|
||||
- run: sudo pip install pytest codecov pytest-cov
|
||||
- run: sudo pip install tensorboardX scikit-learn
|
||||
- run: python -m pytest -sv ./transformers/tests/ --cov
|
||||
- run: codecov
|
||||
build_py2_torch:
|
||||
- run: sudo pip install .[sklearn,tf-cpu,testing]
|
||||
- run: python -m pytest -n 8 --dist=loadfile -s ./tests/ | tee output.txt
|
||||
- store_artifacts:
|
||||
path: ~/transformers/output.txt
|
||||
destination: test_output.txt
|
||||
run_tests_custom_tokenizers:
|
||||
working_directory: ~/transformers
|
||||
resource_class: large
|
||||
parallelism: 1
|
||||
docker:
|
||||
- image: circleci/python:2.7
|
||||
- image: circleci/python:3.6
|
||||
environment:
|
||||
RUN_CUSTOM_TOKENIZERS: yes
|
||||
steps:
|
||||
- checkout
|
||||
- run: sudo pip install torch
|
||||
- run: sudo pip install --progress-bar off .
|
||||
- run: sudo pip install pytest codecov pytest-cov
|
||||
- run: python -m pytest -sv ./transformers/tests/ --cov
|
||||
- run: codecov
|
||||
build_py2_tf:
|
||||
- run: sudo pip install .[mecab,testing]
|
||||
- run: python -m pytest -sv ./tests/test_tokenization_bert_japanese.py
|
||||
run_examples_torch:
|
||||
working_directory: ~/transformers
|
||||
resource_class: large
|
||||
parallelism: 1
|
||||
docker:
|
||||
- image: circleci/python:2.7
|
||||
- image: circleci/python:3.6
|
||||
environment:
|
||||
OMP_NUM_THREADS: 1
|
||||
resource_class: xlarge
|
||||
parallelism: 1
|
||||
steps:
|
||||
- checkout
|
||||
- run: sudo pip install tensorflow==2.0.0-rc0
|
||||
- run: sudo pip install --progress-bar off .
|
||||
- run: sudo pip install pytest codecov pytest-cov
|
||||
- run: python -m pytest -sv ./transformers/tests/ --cov
|
||||
- run: codecov
|
||||
- run: sudo pip install .[sklearn,torch,testing]
|
||||
- run: sudo pip install -r examples/requirements.txt
|
||||
- run: python -m pytest -n 8 --dist=loadfile -s ./examples/ | tee output.txt
|
||||
- store_artifacts:
|
||||
path: ~/transformers/output.txt
|
||||
destination: test_output.txt
|
||||
build_doc:
|
||||
working_directory: ~/transformers
|
||||
docker:
|
||||
- image: circleci/python:3.6
|
||||
steps:
|
||||
- checkout
|
||||
- run: sudo pip install .[tf,torch,docs]
|
||||
- run: cd docs && make html SPHINXOPTS="-W"
|
||||
- store_artifacts:
|
||||
path: ./docs/_build
|
||||
deploy_doc:
|
||||
working_directory: ~/transformers
|
||||
docker:
|
||||
- image: circleci/python:3.5
|
||||
- image: circleci/python:3.6
|
||||
steps:
|
||||
- add_ssh_keys:
|
||||
fingerprints:
|
||||
- "5b:7a:95:18:07:8c:aa:76:4c:60:35:88:ad:60:56:71"
|
||||
fingerprints:
|
||||
- "5b:7a:95:18:07:8c:aa:76:4c:60:35:88:ad:60:56:71"
|
||||
- checkout
|
||||
- run: sudo pip install --progress-bar off -r docs/requirements.txt
|
||||
- run: sudo pip install --progress-bar off -r requirements.txt
|
||||
- run: cd docs/source && ln -s ../../examples/README.md examples.md && cd -
|
||||
- run: cd docs && make clean && make html && scp -r -oStrictHostKeyChecking=no _build/html/* $doc:$dir
|
||||
- run: sudo pip install .[tf,torch,docs]
|
||||
- run: ./.circleci/deploy.sh
|
||||
check_code_quality:
|
||||
working_directory: ~/transformers
|
||||
docker:
|
||||
- image: circleci/python:3.6
|
||||
resource_class: medium
|
||||
parallelism: 1
|
||||
steps:
|
||||
- checkout
|
||||
# we need a version of isort with https://github.com/timothycrosley/isort/pull/1000
|
||||
- run: sudo pip install git+git://github.com/timothycrosley/isort.git@e63ae06ec7d70b06df9e528357650281a3d3ec22#egg=isort
|
||||
- run: sudo pip install .[tf,torch,quality]
|
||||
- run: black --check --line-length 119 --target-version py35 examples templates tests src utils
|
||||
- run: isort --check-only --recursive examples templates tests src utils
|
||||
- run: flake8 examples templates tests src utils
|
||||
check_repository_consistency:
|
||||
working_directory: ~/transformers
|
||||
docker:
|
||||
- image: circleci/python:3.6
|
||||
resource_class: small
|
||||
parallelism: 1
|
||||
steps:
|
||||
- checkout
|
||||
- run: sudo pip install requests
|
||||
- run: python ./utils/link_tester.py
|
||||
workflow_filters: &workflow_filters
|
||||
filters:
|
||||
branches:
|
||||
@ -92,9 +128,12 @@ workflows:
|
||||
version: 2
|
||||
build_and_test:
|
||||
jobs:
|
||||
- build_py3_torch_and_tf
|
||||
- build_py3_torch
|
||||
- build_py3_tf
|
||||
- build_py2_torch
|
||||
- build_py2_tf
|
||||
- deploy_doc: *workflow_filters
|
||||
- check_code_quality
|
||||
- check_repository_consistency
|
||||
- run_examples_torch
|
||||
- run_tests_custom_tokenizers
|
||||
- run_tests_torch_and_tf
|
||||
- run_tests_torch
|
||||
- run_tests_tf
|
||||
- build_doc
|
||||
- deploy_doc: *workflow_filters
|
||||
|
||||
49
.circleci/deploy.sh
Executable file
49
.circleci/deploy.sh
Executable file
@ -0,0 +1,49 @@
|
||||
cd docs
|
||||
|
||||
function deploy_doc(){
|
||||
echo "Creating doc at commit $1 and pushing to folder $2"
|
||||
git checkout $1
|
||||
if [ ! -z "$2" ]
|
||||
then
|
||||
if [ "$2" == "master" ]; then
|
||||
echo "Pushing master"
|
||||
make clean && make html && scp -r -oStrictHostKeyChecking=no _build/html/* $doc:$dir/$2/
|
||||
cp -r _build/html/_static .
|
||||
elif ssh -oStrictHostKeyChecking=no $doc "[ -d $dir/$2 ]"; then
|
||||
echo "Directory" $2 "already exists"
|
||||
scp -r -oStrictHostKeyChecking=no _static/* $doc:$dir/$2/_static/
|
||||
else
|
||||
echo "Pushing version" $2
|
||||
make clean && make html
|
||||
rm -rf _build/html/_static
|
||||
cp -r _static _build/html
|
||||
scp -r -oStrictHostKeyChecking=no _build/html $doc:$dir/$2
|
||||
fi
|
||||
else
|
||||
echo "Pushing stable"
|
||||
make clean && make html
|
||||
rm -rf _build/html/_static
|
||||
cp -r _static _build/html
|
||||
scp -r -oStrictHostKeyChecking=no _build/html/* $doc:$dir
|
||||
fi
|
||||
}
|
||||
|
||||
# You can find the commit for each tag on https://github.com/huggingface/transformers/tags
|
||||
deploy_doc "master" master
|
||||
deploy_doc "b33a385" v1.0.0
|
||||
deploy_doc "fe02e45" v1.1.0
|
||||
deploy_doc "89fd345" v1.2.0
|
||||
deploy_doc "fc9faa8" v2.0.0
|
||||
deploy_doc "3ddce1d" v2.1.1
|
||||
deploy_doc "3616209" v2.2.0
|
||||
deploy_doc "d0f8b9a" v2.3.0
|
||||
deploy_doc "6664ea9" v2.4.0
|
||||
deploy_doc "fb560dc" v2.5.0
|
||||
deploy_doc "b90745c" v2.5.1
|
||||
deploy_doc "fbc5bf1" v2.6.0
|
||||
deploy_doc "6f5a12a" v2.7.0
|
||||
deploy_doc "11c3257" v2.8.0
|
||||
deploy_doc "e7cfc1a" v2.9.0
|
||||
deploy_doc "7cb203f" v2.9.1
|
||||
deploy_doc "10d7239" v2.10.0
|
||||
deploy_doc "b42586e" #v2.11.0 Latest stable release
|
||||
22
.github/ISSUE_TEMPLATE/---new-benchmark.md
vendored
Normal file
22
.github/ISSUE_TEMPLATE/---new-benchmark.md
vendored
Normal file
@ -0,0 +1,22 @@
|
||||
---
|
||||
name: "\U0001F5A5 New benchmark"
|
||||
about: Benchmark a part of this library and share your results
|
||||
title: "[Benchmark]"
|
||||
labels: ''
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
# 🖥 Benchmarking `transformers`
|
||||
|
||||
## Benchmark
|
||||
|
||||
Which part of `transformers` did you benchmark?
|
||||
|
||||
## Set-up
|
||||
|
||||
What did you run your benchmarks on? Please include details, such as: CPU, GPU? If using multiple GPUs, which parallelization did you use?
|
||||
|
||||
## Results
|
||||
|
||||
Put your results here!
|
||||
20
.github/ISSUE_TEMPLATE/--new-model-addition.md
vendored
Normal file
20
.github/ISSUE_TEMPLATE/--new-model-addition.md
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
---
|
||||
name: "\U0001F31F New model addition"
|
||||
about: Submit a proposal/request to implement a new Transformer-based model
|
||||
title: ''
|
||||
labels: New model
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
# 🌟 New model addition
|
||||
|
||||
## Model description
|
||||
|
||||
<!-- Important information -->
|
||||
|
||||
## Open source status
|
||||
|
||||
* [ ] the model implementation is available: (give details)
|
||||
* [ ] the model weights are available: (give details)
|
||||
* [ ] who are the authors: (mention them, if possible by @gh-username)
|
||||
54
.github/ISSUE_TEMPLATE/bug-report.md
vendored
54
.github/ISSUE_TEMPLATE/bug-report.md
vendored
@ -1,25 +1,29 @@
|
||||
---
|
||||
name: "\U0001F41B Bug Report"
|
||||
about: Submit a bug report to help us improve PyTorch Transformers
|
||||
about: Submit a bug report to help us improve transformers
|
||||
title: ''
|
||||
labels: ''
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
## 🐛 Bug
|
||||
# 🐛 Bug
|
||||
|
||||
<!-- Important information -->
|
||||
## Information
|
||||
|
||||
Model I am using (Bert, XLNet....):
|
||||
Model I am using (Bert, XLNet ...):
|
||||
|
||||
Language I am using the model on (English, Chinese....):
|
||||
Language I am using the model on (English, Chinese ...):
|
||||
|
||||
The problem arise when using:
|
||||
* [ ] the official example scripts: (give details)
|
||||
* [ ] my own modified scripts: (give details)
|
||||
The problem arises when using:
|
||||
* [ ] the official example scripts: (give details below)
|
||||
* [ ] my own modified scripts: (give details below)
|
||||
|
||||
The tasks I am working on is:
|
||||
* [ ] an official GLUE/SQUaD task: (give the name)
|
||||
* [ ] my own task or dataset: (give details)
|
||||
* [ ] my own task or dataset: (give details below)
|
||||
|
||||
## To Reproduce
|
||||
## To reproduce
|
||||
|
||||
Steps to reproduce the behavior:
|
||||
|
||||
@ -27,22 +31,22 @@ Steps to reproduce the behavior:
|
||||
2.
|
||||
3.
|
||||
|
||||
<!-- If you have a code sample, error messages, stack traces, please provide it here as well. -->
|
||||
<!-- If you have code snippets, error messages, stack traces please provide them here as well.
|
||||
Important! Use code tags to correctly format your code. See https://help.github.com/en/github/writing-on-github/creating-and-highlighting-code-blocks#syntax-highlighting
|
||||
Do not use screenshots, as they are hard to read and (more importantly) don't allow others to copy-and-paste your code.-->
|
||||
|
||||
## Expected behavior
|
||||
|
||||
<!-- A clear and concise description of what you expected to happen. -->
|
||||
<!-- A clear and concise description of what you would expect to happen. -->
|
||||
|
||||
## Environment
|
||||
|
||||
* OS:
|
||||
* Python version:
|
||||
* PyTorch version:
|
||||
* PyTorch Transformers version (or branch):
|
||||
* Using GPU ?
|
||||
* Distributed of parallel setup ?
|
||||
* Any other relevant information:
|
||||
|
||||
## Additional context
|
||||
|
||||
<!-- Add any other context about the problem here. -->
|
||||
## Environment info
|
||||
<!-- You can run the command `transformers-cli env` and copy-and-paste its output below.
|
||||
Don't forget to fill out the missing fields in that output! -->
|
||||
|
||||
- `transformers` version:
|
||||
- Platform:
|
||||
- Python version:
|
||||
- PyTorch version (GPU?):
|
||||
- Tensorflow version (GPU?):
|
||||
- Using GPU in script?:
|
||||
- Using distributed or parallel set-up in script?:
|
||||
|
||||
23
.github/ISSUE_TEMPLATE/feature-request.md
vendored
23
.github/ISSUE_TEMPLATE/feature-request.md
vendored
@ -1,16 +1,25 @@
|
||||
---
|
||||
name: "\U0001F680 Feature Request"
|
||||
about: Submit a proposal/request for a new PyTorch Transformers feature
|
||||
name: "\U0001F680 Feature request"
|
||||
about: Submit a proposal/request for a new transformers feature
|
||||
title: ''
|
||||
labels: ''
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
## 🚀 Feature
|
||||
# 🚀 Feature request
|
||||
|
||||
<!-- A clear and concise description of the feature proposal. Please provide a link to the paper and code in case they exist. -->
|
||||
<!-- A clear and concise description of the feature proposal.
|
||||
Please provide a link to the paper and code in case they exist. -->
|
||||
|
||||
## Motivation
|
||||
|
||||
<!-- Please outline the motivation for the proposal. Is your feature request related to a problem? e.g., I'm always frustrated when [...]. If this is related to another GitHub issue, please link here too. -->
|
||||
<!-- Please outline the motivation for the proposal. Is your feature request
|
||||
related to a problem? e.g., I'm always frustrated when [...]. If this is related
|
||||
to another GitHub issue, please link here too. -->
|
||||
|
||||
## Additional context
|
||||
## Your contribution
|
||||
|
||||
<!-- Add any other context or screenshots about the feature request here. -->
|
||||
<!-- Is there any way that you could help, e.g. by submitting a PR?
|
||||
Make sure to read the CONTRIBUTING.MD readme:
|
||||
https://github.com/huggingface/transformers/blob/master/CONTRIBUTING.md -->
|
||||
|
||||
61
.github/ISSUE_TEMPLATE/migration.md
vendored
61
.github/ISSUE_TEMPLATE/migration.md
vendored
@ -1,43 +1,58 @@
|
||||
---
|
||||
name: "\U0001F4DA Migration from PyTorch-pretrained-Bert"
|
||||
about: Report a problem when migrating from PyTorch-pretrained-Bert to Transformers
|
||||
name: "\U0001F4DA Migration from pytorch-pretrained-bert or pytorch-transformers"
|
||||
about: Report a problem when migrating from pytorch-pretrained-bert or pytorch-transformers
|
||||
to transformers
|
||||
title: ''
|
||||
labels: Migration
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
## 📚 Migration
|
||||
# 📚 Migration
|
||||
|
||||
## Information
|
||||
|
||||
<!-- Important information -->
|
||||
|
||||
Model I am using (Bert, XLNet....):
|
||||
Model I am using (Bert, XLNet ...):
|
||||
|
||||
Language I am using the model on (English, Chinese....):
|
||||
Language I am using the model on (English, Chinese ...):
|
||||
|
||||
The problem arise when using:
|
||||
* [ ] the official example scripts: (give details)
|
||||
* [ ] my own modified scripts: (give details)
|
||||
The problem arises when using:
|
||||
* [ ] the official example scripts: (give details below)
|
||||
* [ ] my own modified scripts: (give details below)
|
||||
|
||||
The tasks I am working on is:
|
||||
* [ ] an official GLUE/SQUaD task: (give the name)
|
||||
* [ ] my own task or dataset: (give details)
|
||||
* [ ] my own task or dataset: (give details below)
|
||||
|
||||
Details of the issue:
|
||||
## Details
|
||||
|
||||
<!-- A clear and concise description of the migration issue. If you have code snippets, please provide it here as well. -->
|
||||
<!-- A clear and concise description of the migration issue.
|
||||
If you have code snippets, please provide it here as well.
|
||||
Important! Use code tags to correctly format your code. See https://help.github.com/en/github/writing-on-github/creating-and-highlighting-code-blocks#syntax-highlighting
|
||||
Do not use screenshots, as they are hard to read and (more importantly) don't allow others to copy-and-paste your code.
|
||||
-->
|
||||
|
||||
## Environment
|
||||
## Environment info
|
||||
<!-- You can run the command `python transformers-cli env` and copy-and-paste its output below.
|
||||
Don't forget to fill out the missing fields in that output! -->
|
||||
|
||||
- `transformers` version:
|
||||
- Platform:
|
||||
- Python version:
|
||||
- PyTorch version (GPU?):
|
||||
- Tensorflow version (GPU?):
|
||||
- Using GPU in script?:
|
||||
- Using distributed or parallel set-up in script?:
|
||||
|
||||
<!-- IMPORTANT: which version of the former library do you use? -->
|
||||
* `pytorch-transformers` or `pytorch-pretrained-bert` version (or branch):
|
||||
|
||||
* OS:
|
||||
* Python version:
|
||||
* PyTorch version:
|
||||
* PyTorch Transformers version (or branch):
|
||||
* Using GPU ?
|
||||
* Distributed of parallel setup ?
|
||||
* Any other relevant information:
|
||||
|
||||
## Checklist
|
||||
|
||||
- [ ] I have read the migration guide in the readme.
|
||||
([pytorch-transformers](https://github.com/huggingface/transformers#migrating-from-pytorch-transformers-to-transformers);
|
||||
[pytorch-pretrained-bert](https://github.com/huggingface/transformers#migrating-from-pytorch-pretrained-bert-to-transformers))
|
||||
- [ ] I checked if a related official extension example runs on my machine.
|
||||
|
||||
## Additional context
|
||||
|
||||
<!-- Add any other context about the problem here. -->
|
||||
29
.github/ISSUE_TEMPLATE/question-help.md
vendored
29
.github/ISSUE_TEMPLATE/question-help.md
vendored
@ -1,8 +1,29 @@
|
||||
---
|
||||
name: "❓Questions & Help"
|
||||
about: Start a general discussion related to PyTorch Transformers
|
||||
name: "❓ Questions & Help"
|
||||
about: Post your general questions on Stack Overflow tagged huggingface-transformers
|
||||
title: ''
|
||||
labels: ''
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
## ❓ Questions & Help
|
||||
# ❓ Questions & Help
|
||||
|
||||
<!-- A clear and concise description of the question. -->
|
||||
<!-- The GitHub issue tracker is primarly intended for bugs, feature requests,
|
||||
new models and benchmarks, and migration questions. For all other questions,
|
||||
we direct you to Stack Overflow (SO) where a whole community of PyTorch and
|
||||
Tensorflow enthusiast can help you out. Make sure to tag your question with the
|
||||
right deep learning framework as well as the huggingface-transformers tag:
|
||||
https://stackoverflow.com/questions/tagged/huggingface-transformers
|
||||
|
||||
If your question wasn't answered after a period of time on Stack Overflow, you
|
||||
can always open a question on GitHub. You should then link to the SO question
|
||||
that you posted.
|
||||
-->
|
||||
|
||||
## Details
|
||||
<!-- Description of your issue -->
|
||||
|
||||
<!-- You should first ask your question on SO, and only if
|
||||
you didn't get an answer ask it here on GitHub. -->
|
||||
**A link to original question on Stack Overflow**:
|
||||
|
||||
19
.github/workflows/github-push.yml
vendored
Normal file
19
.github/workflows/github-push.yml
vendored
Normal file
@ -0,0 +1,19 @@
|
||||
name: GitHub-hosted runner
|
||||
|
||||
on: push
|
||||
|
||||
jobs:
|
||||
check_code_quality:
|
||||
runs-on: ubuntu-18.04
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v1
|
||||
with:
|
||||
python-version: 3.7
|
||||
# - name: Install dependencies
|
||||
# run: |
|
||||
# pip install .[tf,torch,quality]
|
||||
|
||||
|
||||
|
||||
32
.github/workflows/github-torch-hub.yml
vendored
Normal file
32
.github/workflows/github-torch-hub.yml
vendored
Normal file
@ -0,0 +1,32 @@
|
||||
name: Torch hub integration
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- "*"
|
||||
|
||||
jobs:
|
||||
torch_hub_integration:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
# no checkout necessary here.
|
||||
- name: Extract branch name
|
||||
run: echo "::set-env name=BRANCH::${GITHUB_REF#refs/heads/}"
|
||||
- name: Check branch name
|
||||
run: echo $BRANCH
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v1
|
||||
with:
|
||||
python-version: 3.7
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
pip install torch
|
||||
pip install numpy tokenizers filelock requests tqdm regex sentencepiece sacremoses packaging
|
||||
|
||||
- name: Torch hub list
|
||||
run: |
|
||||
python -c "import torch; print(torch.hub.list('huggingface/transformers:$BRANCH'))"
|
||||
|
||||
- name: Torch hub help
|
||||
run: |
|
||||
python -c "import torch; print(torch.hub.help('huggingface/transformers:$BRANCH', 'modelForSequenceClassification'))"
|
||||
54
.github/workflows/self-push.yml
vendored
Normal file
54
.github/workflows/self-push.yml
vendored
Normal file
@ -0,0 +1,54 @@
|
||||
name: Self-hosted runner (push)
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
paths:
|
||||
- "src/**"
|
||||
- "tests/**"
|
||||
- ".github/**"
|
||||
# pull_request:
|
||||
repository_dispatch:
|
||||
|
||||
|
||||
jobs:
|
||||
run_tests_torch_and_tf_gpu:
|
||||
runs-on: self-hosted
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Python version
|
||||
run: |
|
||||
which python
|
||||
python --version
|
||||
pip --version
|
||||
- name: Current dir
|
||||
run: pwd
|
||||
- run: nvidia-smi
|
||||
- name: Create new python env (on self-hosted runners we have to handle isolation ourselves)
|
||||
run: |
|
||||
python -m venv .env
|
||||
source .env/bin/activate
|
||||
which python
|
||||
python --version
|
||||
pip --version
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
source .env/bin/activate
|
||||
pip install torch
|
||||
pip install .[sklearn,testing]
|
||||
|
||||
- name: Are GPUs recognized by our DL frameworks
|
||||
run: |
|
||||
source .env/bin/activate
|
||||
python -c "import torch; print(torch.cuda.is_available())"
|
||||
|
||||
- name: Run all non-slow tests on GPU
|
||||
env:
|
||||
TF_FORCE_GPU_ALLOW_GROWTH: "true"
|
||||
# TF_GPU_MEMORY_LIMIT: 4096
|
||||
OMP_NUM_THREADS: 1
|
||||
USE_CUDA: yes
|
||||
run: |
|
||||
source .env/bin/activate
|
||||
python -m pytest -n 2 --dist=loadfile -s -v ./tests/
|
||||
50
.github/workflows/self-scheduled.yml
vendored
Normal file
50
.github/workflows/self-scheduled.yml
vendored
Normal file
@ -0,0 +1,50 @@
|
||||
name: Self-hosted runner (scheduled)
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- ci_*
|
||||
repository_dispatch:
|
||||
schedule:
|
||||
- cron: "0 0 * * *"
|
||||
|
||||
jobs:
|
||||
run_all_tests_torch_and_tf_gpu:
|
||||
runs-on: self-hosted
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Python version
|
||||
run: |
|
||||
which python
|
||||
python --version
|
||||
pip --version
|
||||
- name: Current dir
|
||||
run: pwd
|
||||
- run: nvidia-smi
|
||||
- name: Create new python env (on self-hosted runners we have to handle isolation ourselves)
|
||||
run: |
|
||||
python -m venv .env
|
||||
source .env/bin/activate
|
||||
which python
|
||||
python --version
|
||||
pip --version
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
source .env/bin/activate
|
||||
pip install .[sklearn,torch,testing]
|
||||
|
||||
- name: Are GPUs recognized by our DL frameworks
|
||||
run: |
|
||||
source .env/bin/activate
|
||||
python -c "import torch; print(torch.cuda.is_available())"
|
||||
|
||||
- name: Run all tests on GPU
|
||||
env:
|
||||
TF_FORCE_GPU_ALLOW_GROWTH: "true"
|
||||
OMP_NUM_THREADS: 1
|
||||
RUN_SLOW: yes
|
||||
USE_CUDA: yes
|
||||
run: |
|
||||
source .env/bin/activate
|
||||
python -m pytest -n 1 --dist=loadfile -s -v ./tests/
|
||||
|
||||
25
.gitignore
vendored
25
.gitignore
vendored
@ -8,6 +8,10 @@ __pycache__/
|
||||
# C extensions
|
||||
*.so
|
||||
|
||||
# tests and logs
|
||||
tests/fixtures
|
||||
logs/
|
||||
|
||||
# Distribution / packaging
|
||||
.Python
|
||||
build/
|
||||
@ -116,8 +120,12 @@ dmypy.json
|
||||
.pyre/
|
||||
|
||||
# vscode
|
||||
.vs
|
||||
.vscode
|
||||
|
||||
# Pycharm
|
||||
.idea
|
||||
|
||||
# TF code
|
||||
tensorflow_code
|
||||
|
||||
@ -127,8 +135,21 @@ proc_data
|
||||
|
||||
# examples
|
||||
runs
|
||||
examples/runs
|
||||
/runs_old
|
||||
/wandb
|
||||
/examples/runs
|
||||
/examples/**/*.args
|
||||
|
||||
# data
|
||||
/data
|
||||
serialization_dir
|
||||
serialization_dir
|
||||
|
||||
# emacs
|
||||
*.*~
|
||||
debug.env
|
||||
|
||||
# vim
|
||||
.*.swp
|
||||
|
||||
#ctags
|
||||
tags
|
||||
|
||||
277
CONTRIBUTING.md
Normal file
277
CONTRIBUTING.md
Normal file
@ -0,0 +1,277 @@
|
||||
# How to contribute to transformers?
|
||||
|
||||
Everyone is welcome to contribute, and we value everybody's contribution. Code
|
||||
is thus not the only way to help the community. Answering questions, helping
|
||||
others, reaching out and improving the documentations are immensely valuable to
|
||||
the community.
|
||||
|
||||
It also helps us if you spread the word: reference the library from blog posts
|
||||
on the awesome projects it made possible, shout out on Twitter every time it has
|
||||
helped you, or simply star the repo to say "thank you".
|
||||
|
||||
## You can contribute in so many ways!
|
||||
|
||||
There are 4 ways you can contribute to transformers:
|
||||
* Fixing outstanding issues with the existing code;
|
||||
* Implementing new models;
|
||||
* Contributing to the examples or to the documentation;
|
||||
* Submitting issues related to bugs or desired new features.
|
||||
|
||||
*All are equally valuable to the community.*
|
||||
|
||||
## Submitting a new issue or feature request
|
||||
|
||||
Do your best to follow these guidelines when submitting an issue or a feature
|
||||
request. It will make it easier for us to come back to you quickly and with good
|
||||
feedback.
|
||||
|
||||
### Did you find a bug?
|
||||
|
||||
The transformers are robust and reliable thanks to the users who notify us of
|
||||
the problems they encounter. So thank you for reporting an issue.
|
||||
|
||||
First, we would really appreciate it if you could **make sure the bug was not
|
||||
already reported** (use the search bar on Github under Issues).
|
||||
|
||||
Did not find it? :( So we can act quickly on it, please follow these steps:
|
||||
|
||||
* Include your **OS type and version**, the versions of **Python**, **PyTorch** and
|
||||
**Tensorflow** when applicable;
|
||||
* A short, self-contained, code snippet that allows us to reproduce the bug in
|
||||
less than 30s;
|
||||
* Provide the *full* traceback if an exception is raised.
|
||||
|
||||
To get the OS and software versions automatically, you can run the following command:
|
||||
|
||||
```bash
|
||||
transformers-cli env
|
||||
```
|
||||
|
||||
or from the root of the repository the following command:
|
||||
|
||||
```bash
|
||||
python src/transformers/commands/transformers_cli.py env
|
||||
```
|
||||
|
||||
|
||||
### Do you want to implement a new model?
|
||||
|
||||
Awesome! Please provide the following information:
|
||||
|
||||
* Short description of the model and link to the paper;
|
||||
* Link to the implementation if it is open-source;
|
||||
* Link to the model weights if they are available.
|
||||
|
||||
If you are willing to contribute the model yourself, let us know so we can best
|
||||
guide you.
|
||||
|
||||
We have added a **detailed guide and templates** to guide you in the process of adding a new model. You can find them
|
||||
in the [`templates`](https://github.com/huggingface/transformers/templates) folder.
|
||||
|
||||
### Do you want a new feature (that is not a model)?
|
||||
|
||||
A world-class feature request addresses the following points:
|
||||
|
||||
1. Motivation first:
|
||||
* Is it related to a problem/frustration with the library? If so, please explain
|
||||
why. Providing a code snippet that demonstrates the problem is best.
|
||||
* Is it related to something you would need for a project? We'd love to hear
|
||||
about it!
|
||||
* Is it something you worked on and think could benefit the community?
|
||||
Awesome! Tell us what problem it solved for you.
|
||||
2. Write a *full paragraph* describing the feature;
|
||||
3. Provide a **code snippet** that demonstrates its future use;
|
||||
4. In case this is related to a paper, please attach a link;
|
||||
5. Attach any additional information (drawings, screenshots, etc.) you think may help.
|
||||
|
||||
If your issue is well written we're already 80% of the way there by the time you
|
||||
post it.
|
||||
|
||||
We have added **templates** to guide you in the process of adding a new example script for training or testing the
|
||||
models in the library. You can find them in the [`templates`](https://github.com/huggingface/transformers/templates)
|
||||
folder.
|
||||
|
||||
## Start contributing! (Pull Requests)
|
||||
|
||||
Before writing code, we strongly advise you to search through the exising PRs or
|
||||
issues to make sure that nobody is already working on the same thing. If you are
|
||||
unsure, it is always a good idea to open an issue to get some feedback.
|
||||
|
||||
You will need basic `git` proficiency to be able to contribute to
|
||||
`transformers`. `git` is not the easiest tool to use but it has the greatest
|
||||
manual. Type `git --help` in a shell and enjoy. If you prefer books, [Pro
|
||||
Git](https://git-scm.com/book/en/v2) is a very good reference.
|
||||
|
||||
Follow these steps to start contributing:
|
||||
|
||||
1. Fork the [repository](https://github.com/huggingface/transformers) by
|
||||
clicking on the 'Fork' button on the repository's page. This creates a copy of the code
|
||||
under your GitHub user account.
|
||||
|
||||
2. Clone your fork to your local disk, and add the base repository as a remote:
|
||||
|
||||
```bash
|
||||
$ git clone git@github.com:<your Github handle>/transformers.git
|
||||
$ cd transformers
|
||||
$ git remote add upstream https://github.com/huggingface/transformers.git
|
||||
```
|
||||
|
||||
3. Create a new branch to hold your development changes:
|
||||
|
||||
```bash
|
||||
$ git checkout -b a-descriptive-name-for-my-changes
|
||||
```
|
||||
|
||||
**do not** work on the `master` branch.
|
||||
|
||||
4. Set up a development environment by running the following command in a virtual environment:
|
||||
|
||||
```bash
|
||||
$ pip install -e ".[dev]"
|
||||
```
|
||||
|
||||
(If transformers was already installed in the virtual environment, remove
|
||||
it with `pip uninstall transformers` before reinstalling it in editable
|
||||
mode with the `-e` flag.)
|
||||
|
||||
Right now, we need an unreleased version of `isort` to avoid a
|
||||
[bug](https://github.com/timothycrosley/isort/pull/1000):
|
||||
|
||||
```bash
|
||||
$ pip install -U git+git://github.com/timothycrosley/isort.git@e63ae06ec7d70b06df9e528357650281a3d3ec22#egg=isort
|
||||
```
|
||||
5. Develop the features on your branch.
|
||||
|
||||
As you work on the features, you should make sure that the test suite
|
||||
passes:
|
||||
|
||||
```bash
|
||||
$ make test
|
||||
```
|
||||
|
||||
`transformers` relies on `black` and `isort` to format its source code
|
||||
consistently. After you make changes, format them with:
|
||||
|
||||
```bash
|
||||
$ make style
|
||||
```
|
||||
|
||||
`transformers` also uses `flake8` to check for coding mistakes. Quality
|
||||
control runs in CI, however you can also run the same checks with:
|
||||
|
||||
```bash
|
||||
$ make quality
|
||||
```
|
||||
|
||||
Once you're happy with your changes, add changed files using `git add` and
|
||||
make a commit with `git commit` to record your changes locally:
|
||||
|
||||
```bash
|
||||
$ git add modified_file.py
|
||||
$ git commit
|
||||
```
|
||||
|
||||
Please write [good commit
|
||||
messages](https://chris.beams.io/posts/git-commit/).
|
||||
|
||||
It is a good idea to sync your copy of the code with the original
|
||||
repository regularly. This way you can quickly account for changes:
|
||||
|
||||
```bash
|
||||
$ git fetch upstream
|
||||
$ git rebase upstream/master
|
||||
```
|
||||
|
||||
Push the changes to your account using:
|
||||
|
||||
```bash
|
||||
$ git push -u origin a-descriptive-name-for-my-changes
|
||||
```
|
||||
|
||||
6. Once you are satisfied (**and the checklist below is happy too**), go to the
|
||||
webpage of your fork on GitHub. Click on 'Pull request' to send your changes
|
||||
to the project maintainers for review.
|
||||
|
||||
7. It's ok if maintainers ask you for changes. It happens to core contributors
|
||||
too! So everyone can see the changes in the Pull request, work in your local
|
||||
branch and push the changes to your fork. They will automatically appear in
|
||||
the pull request.
|
||||
|
||||
|
||||
### Checklist
|
||||
|
||||
1. The title of your pull request should be a summary of its contribution;
|
||||
2. If your pull request adresses an issue, please mention the issue number in
|
||||
the pull request description to make sure they are linked (and people
|
||||
consulting the issue know you are working on it);
|
||||
3. To indicate a work in progress please prefix the title with `[WIP]`. These
|
||||
are useful to avoid duplicated work, and to differentiate it from PRs ready
|
||||
to be merged;
|
||||
4. Make sure existing tests pass;
|
||||
5. Add high-coverage tests. No quality testing = no merge.
|
||||
- If you are adding a new model, make sure that you use
|
||||
`ModelTester.all_model_classes = (MyModel, MyModelWithLMHead,...)`, which triggers the common tests.
|
||||
- If you are adding new `@slow` tests, make sure they pass using
|
||||
`RUN_SLOW=1 python -m pytest tests/test_my_new_model.py`.
|
||||
- If you are adding a new tokenizer, write tests, and make sure
|
||||
`RUN_SLOW=1 python -m pytest tests/test_tokenization_{your_model_name}.py` passes.
|
||||
CircleCI does not run the slow tests.
|
||||
6. All public methods must have informative docstrings that work nicely with sphinx. See `modeling_ctrl.py` for an
|
||||
example.
|
||||
|
||||
### Tests
|
||||
|
||||
An extensive test suite is included to test the library behavior and several examples. Library tests can be found in
|
||||
the [tests folder](https://github.com/huggingface/transformers/tree/master/tests) and examples tests in the
|
||||
[examples folder](https://github.com/huggingface/transformers/tree/master/examples).
|
||||
|
||||
We like `pytest` and `pytest-xdist` because it's faster. From the root of the
|
||||
repository, here's how to run tests with `pytest` for the library:
|
||||
|
||||
```bash
|
||||
$ python -m pytest -n auto --dist=loadfile -s -v ./tests/
|
||||
```
|
||||
|
||||
and for the examples:
|
||||
|
||||
```bash
|
||||
$ pip install -r examples/requirements.txt # only needed the first time
|
||||
$ python -m pytest -n auto --dist=loadfile -s -v ./examples/
|
||||
```
|
||||
|
||||
In fact, that's how `make test` and `make test-examples` are implemented!
|
||||
|
||||
You can specify a smaller set of tests in order to test only the feature
|
||||
you're working on.
|
||||
|
||||
By default, slow tests are skipped. Set the `RUN_SLOW` environment variable to
|
||||
`yes` to run them. This will download many gigabytes of models — make sure you
|
||||
have enough disk space and a good Internet connection, or a lot of patience!
|
||||
|
||||
```bash
|
||||
$ RUN_SLOW=yes python -m pytest -n auto --dist=loadfile -s -v ./tests/
|
||||
$ RUN_SLOW=yes python -m pytest -n auto --dist=loadfile -s -v ./examples/
|
||||
```
|
||||
|
||||
Likewise, set the `RUN_CUSTOM_TOKENIZERS` environment variable to `yes` to run
|
||||
tests for custom tokenizers, which don't run by default either.
|
||||
|
||||
🤗 Transformers uses `pytest` as a test runner only. It doesn't use any
|
||||
`pytest`-specific features in the test suite itself.
|
||||
|
||||
This means `unittest` is fully supported. Here's how to run tests with
|
||||
`unittest`:
|
||||
|
||||
```bash
|
||||
$ python -m unittest discover -s tests -t . -v
|
||||
$ python -m unittest discover -s examples -t examples -v
|
||||
```
|
||||
|
||||
|
||||
### Style guide
|
||||
|
||||
For documentation strings, `transformers` follows the [google style](https://google.github.io/styleguide/pyguide.html).
|
||||
Check our [documentation writing guide](https://github.com/huggingface/transformers/tree/master/docs#writing-documentation---specification)
|
||||
for more information.
|
||||
|
||||
#### This guide was heavily inspired by the awesome [scikit-learn guide to contributing](https://github.com/scikit-learn/scikit-learn/blob/master/CONTRIBUTING.md)
|
||||
24
Makefile
Normal file
24
Makefile
Normal file
@ -0,0 +1,24 @@
|
||||
.PHONY: quality style test test-examples
|
||||
|
||||
# Check that source code meets quality standards
|
||||
|
||||
quality:
|
||||
black --check --line-length 119 --target-version py35 examples templates tests src utils
|
||||
isort --check-only --recursive examples templates tests src utils
|
||||
flake8 examples templates tests src utils
|
||||
|
||||
# Format source code automatically
|
||||
|
||||
style:
|
||||
black --line-length 119 --target-version py35 examples templates tests src utils
|
||||
isort --recursive examples templates tests src utils
|
||||
|
||||
# Run tests for the library
|
||||
|
||||
test:
|
||||
python -m pytest -n auto --dist=loadfile -s -v ./tests/
|
||||
|
||||
# Run tests for examples
|
||||
|
||||
test-examples:
|
||||
python -m pytest -n auto --dist=loadfile -s -v ./examples/
|
||||
366
README.md
366
README.md
@ -4,7 +4,7 @@
|
||||
<br>
|
||||
<p>
|
||||
<p align="center">
|
||||
<a href="https://github.com/huggingface/transformers/blob/master/LICENSE">
|
||||
<a href="https://circleci.com/gh/huggingface/transformers">
|
||||
<img alt="Build" src="https://img.shields.io/circleci/build/github/huggingface/transformers/master">
|
||||
</a>
|
||||
<a href="https://github.com/huggingface/transformers/blob/master/LICENSE">
|
||||
@ -19,15 +19,15 @@
|
||||
</p>
|
||||
|
||||
<h3 align="center">
|
||||
<p>State-of-the-art Natural Language Processing for TensorFlow 2.0 and PyTorch
|
||||
<p>State-of-the-art Natural Language Processing for PyTorch and TensorFlow 2.0
|
||||
</h3>
|
||||
|
||||
🤗 Transformers (formerly known as `pytorch-transformers` and `pytorch-pretrained-bert`) provides state-of-the-art general-purpose architectures (BERT, GPT-2, RoBERTa, XLM, DistilBert, XLNet...) for Natural Language Understanding (NLU) and Natural Language Generation (NLG) with over 32+ pretrained models in 100+ languages and deep interoperability between TensorFlow 2.0 and PyTorch.
|
||||
🤗 Transformers (formerly known as `pytorch-transformers` and `pytorch-pretrained-bert`) provides state-of-the-art general-purpose architectures (BERT, GPT-2, RoBERTa, XLM, DistilBert, XLNet, T5, CTRL...) for Natural Language Understanding (NLU) and Natural Language Generation (NLG) with over thousands of pretrained models in 100+ languages and deep interoperability between PyTorch & TensorFlow 2.0.
|
||||
|
||||
### Recent contributors
|
||||
[](https://sourcerer.io/fame/clmnt/huggingface/transformers/links/0)[](https://sourcerer.io/fame/clmnt/huggingface/transformers/links/1)[](https://sourcerer.io/fame/clmnt/huggingface/transformers/links/2)[](https://sourcerer.io/fame/clmnt/huggingface/transformers/links/3)[](https://sourcerer.io/fame/clmnt/huggingface/transformers/links/4)[](https://sourcerer.io/fame/clmnt/huggingface/transformers/links/5)[](https://sourcerer.io/fame/clmnt/huggingface/transformers/links/6)[](https://sourcerer.io/fame/clmnt/huggingface/transformers/links/7)
|
||||
|
||||
### Features
|
||||
|
||||
- As easy to use as pytorch-transformers
|
||||
- As powerful and concise as Keras
|
||||
- High performance on NLU and NLG tasks
|
||||
- Low barrier to entry for educators and practitioners
|
||||
|
||||
@ -39,7 +39,7 @@ State-of-the-art NLP for everyone
|
||||
Lower compute costs, smaller carbon footprint
|
||||
- Researchers can share trained models instead of always retraining
|
||||
- Practitioners can reduce compute time and production costs
|
||||
- 8 architectures with over 30 pretrained models, some in more than 100 languages
|
||||
- Dozens of architectures with over 1,000 pretrained models, some in more than 100 languages
|
||||
|
||||
Choose the right framework for every part of a model's lifetime
|
||||
- Train state-of-the-art models in 3 lines of code
|
||||
@ -54,19 +54,30 @@ Choose the right framework for every part of a model's lifetime
|
||||
| [Model architectures](#model-architectures) | Architectures (with pretrained weights) |
|
||||
| [Online demo](#online-demo) | Experimenting with this repo’s text generation capabilities |
|
||||
| [Quick tour: Usage](#quick-tour) | Tokenizers & models usage: Bert and GPT-2 |
|
||||
| [Quick tour: TF 2.0 and PyTorch ](#Quick-tour-TF-2.0-training-and-PyTorch-interoperability) | Train a TF 2.0 model in 10 lines of code, load it in PyTorch |
|
||||
| [Quick tour: TF 2.0 and PyTorch ](#Quick-tour-TF-20-training-and-PyTorch-interoperability) | Train a TF 2.0 model in 10 lines of code, load it in PyTorch |
|
||||
| [Quick tour: pipelines](#quick-tour-of-pipelines) | Using Pipelines: Wrapper around tokenizer and models to use finetuned models |
|
||||
| [Quick tour: Fine-tuning/usage scripts](#quick-tour-of-the-fine-tuningusage-scripts) | Using provided scripts: GLUE, SQuAD and Text generation |
|
||||
| [Migrating from pytorch-transformers to transformers](#Migrating-from-pytorch-transformers-to-transformers) | Migrating your code from pytorch-pretrained-bert to transformers |
|
||||
| [Quick tour: Share your models ](#Quick-tour-of-model-sharing) | Upload and share your fine-tuned models with the community |
|
||||
| [Migrating from pytorch-transformers to transformers](#Migrating-from-pytorch-transformers-to-transformers) | Migrating your code from pytorch-transformers to transformers |
|
||||
| [Migrating from pytorch-pretrained-bert to pytorch-transformers](#Migrating-from-pytorch-pretrained-bert-to-transformers) | Migrating your code from pytorch-pretrained-bert to transformers |
|
||||
| [Documentation](https://huggingface.co/transformers/) | Full API documentation and more |
|
||||
|
||||
## Installation
|
||||
|
||||
This repo is tested on Python 2.7 and 3.5+ (examples are tested only on python 3.5+) and PyTorch 1.0.0+
|
||||
This repo is tested on Python 3.6+, PyTorch 1.0.0+ (PyTorch 1.3.1+ for examples) and TensorFlow 2.0.
|
||||
|
||||
You should install 🤗 Transformers in a [virtual environment](https://docs.python.org/3/library/venv.html). If you're unfamiliar with Python virtual environments, check out the [user guide](https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/).
|
||||
|
||||
Create a virtual environment with the version of Python you're going to use and activate it.
|
||||
|
||||
Now, if you want to use 🤗 Transformers, you can install it with pip. If you'd like to play with the examples, you must install it from source.
|
||||
|
||||
### With pip
|
||||
|
||||
Transformers can be installed by pip as follows:
|
||||
First you need to install one of, or both, TensorFlow 2.0 and PyTorch.
|
||||
Please refer to [TensorFlow installation page](https://www.tensorflow.org/install/pip#tensorflow-2.0-rc-is-available) and/or [PyTorch installation page](https://pytorch.org/get-started/locally/#start-locally) regarding the specific install command for your platform.
|
||||
|
||||
When TensorFlow 2.0 and/or PyTorch has been installed, 🤗 Transformers can be installed using pip as follows:
|
||||
|
||||
```bash
|
||||
pip install transformers
|
||||
@ -74,49 +85,92 @@ pip install transformers
|
||||
|
||||
### From source
|
||||
|
||||
Clone the repository and run:
|
||||
Here also, you first need to install one of, or both, TensorFlow 2.0 and PyTorch.
|
||||
Please refer to [TensorFlow installation page](https://www.tensorflow.org/install/pip#tensorflow-2.0-rc-is-available) and/or [PyTorch installation page](https://pytorch.org/get-started/locally/#start-locally) regarding the specific install command for your platform.
|
||||
|
||||
When TensorFlow 2.0 and/or PyTorch has been installed, you can install from source by cloning the repository and running:
|
||||
|
||||
```bash
|
||||
pip install [--editable] .
|
||||
git clone https://github.com/huggingface/transformers
|
||||
cd transformers
|
||||
pip install .
|
||||
```
|
||||
|
||||
When you update the repository, you should upgrade the transformers installation and its dependencies as follows:
|
||||
|
||||
```bash
|
||||
git pull
|
||||
pip install --upgrade .
|
||||
```
|
||||
|
||||
### Run the examples
|
||||
|
||||
Examples are included in the repository but are not shipped with the library.
|
||||
|
||||
Therefore, in order to run the latest versions of the examples, you need to install from source, as described above.
|
||||
|
||||
Look at the [README](https://github.com/huggingface/transformers/blob/master/examples/README.md) for how to run examples.
|
||||
|
||||
### Tests
|
||||
|
||||
A series of tests is included for the library and the example scripts. Library tests can be found in the [tests folder](https://github.com/huggingface/transformers/tree/master/transformers/tests) and examples tests in the [examples folder](https://github.com/huggingface/transformers/tree/master/examples).
|
||||
A series of tests are included for the library and for some example scripts. Library tests can be found in the [tests folder](https://github.com/huggingface/transformers/tree/master/tests) and examples tests in the [examples folder](https://github.com/huggingface/transformers/tree/master/examples).
|
||||
|
||||
These tests can be run using `pytest` (install pytest if needed with `pip install pytest`).
|
||||
Depending on which framework is installed (TensorFlow 2.0 and/or PyTorch), the irrelevant tests will be skipped. Ensure that both frameworks are installed if you want to execute all tests.
|
||||
|
||||
You can run the tests from the root of the cloned repository with the commands:
|
||||
Here's the easiest way to run tests for the library:
|
||||
|
||||
```bash
|
||||
python -m pytest -sv ./transformers/tests/
|
||||
python -m pytest -sv ./examples/
|
||||
pip install -e ".[testing]"
|
||||
make test
|
||||
```
|
||||
|
||||
and for the examples:
|
||||
|
||||
```bash
|
||||
pip install -e ".[testing]"
|
||||
pip install -r examples/requirements.txt
|
||||
make test-examples
|
||||
```
|
||||
|
||||
For details, refer to the [contributing guide](https://github.com/huggingface/transformers/blob/master/CONTRIBUTING.md#tests).
|
||||
|
||||
### Do you want to run a Transformer model on a mobile device?
|
||||
|
||||
You should check out our [`swift-coreml-transformers`](https://github.com/huggingface/swift-coreml-transformers) repo.
|
||||
|
||||
It contains an example of a conversion script from a Pytorch trained Transformer model (here, `GPT-2`) to a CoreML model that runs on iOS devices.
|
||||
It contains a set of tools to convert PyTorch or TensorFlow 2.0 trained Transformer models (currently contains `GPT-2`, `DistilGPT-2`, `BERT`, and `DistilBERT`) to CoreML models that run on iOS devices.
|
||||
|
||||
At some point in the future, you'll be able to seamlessly move from pre-training or fine-tuning models in PyTorch to productizing them in CoreML,
|
||||
or prototype a model or an app in CoreML then research its hyperparameters or architecture from PyTorch. Super exciting!
|
||||
At some point in the future, you'll be able to seamlessly move from pre-training or fine-tuning models to productizing them in CoreML, or prototype a model or an app in CoreML then research its hyperparameters or architecture from TensorFlow 2.0 and/or PyTorch. Super exciting!
|
||||
|
||||
## Model architectures
|
||||
|
||||
🤗 Transformers currently provides 8 NLU/NLG architectures:
|
||||
🤗 Transformers currently provides the following NLU/NLG architectures:
|
||||
|
||||
1. **[BERT](https://github.com/google-research/bert)** (from Google) released with the paper [BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding](https://arxiv.org/abs/1810.04805) by Jacob Devlin, Ming-Wei Chang, Kenton Lee and Kristina Toutanova.
|
||||
2. **[GPT](https://github.com/openai/finetune-transformer-lm)** (from OpenAI) released with the paper [Improving Language Understanding by Generative Pre-Training](https://blog.openai.com/language-unsupervised/) by Alec Radford, Karthik Narasimhan, Tim Salimans and Ilya Sutskever.
|
||||
3. **[GPT-2](https://blog.openai.com/better-language-models/)** (from OpenAI) released with the paper [Language Models are Unsupervised Multitask Learners](https://blog.openai.com/better-language-models/) by Alec Radford*, Jeffrey Wu*, Rewon Child, David Luan, Dario Amodei** and Ilya Sutskever**.
|
||||
4. **[Transformer-XL](https://github.com/kimiyoung/transformer-xl)** (from Google/CMU) released with the paper [Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context](https://arxiv.org/abs/1901.02860) by Zihang Dai*, Zhilin Yang*, Yiming Yang, Jaime Carbonell, Quoc V. Le, Ruslan Salakhutdinov.
|
||||
5. **[XLNet](https://github.com/zihangdai/xlnet/)** (from Google/CMU) released with the paper [XLNet: Generalized Autoregressive Pretraining for Language Understanding](https://arxiv.org/abs/1906.08237) by Zhilin Yang*, Zihang Dai*, Yiming Yang, Jaime Carbonell, Ruslan Salakhutdinov, Quoc V. Le.
|
||||
6. **[XLM](https://github.com/facebookresearch/XLM/)** (from Facebook) released together with the paper [Cross-lingual Language Model Pretraining](https://arxiv.org/abs/1901.07291) by Guillaume Lample and Alexis Conneau.
|
||||
7. **[RoBERTa](https://github.com/pytorch/fairseq/tree/master/examples/roberta)** (from Facebook), released together with the paper a [Robustly Optimized BERT Pretraining Approach](https://arxiv.org/abs/1907.11692) by Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, Veselin Stoyanov.
|
||||
8. **[DistilBERT](https://github.com/huggingface/transformers/tree/master/examples/distillation)** (from HuggingFace), released together with the blogpost [Smaller, faster, cheaper, lighter: Introducing DistilBERT, a distilled version of BERT](https://medium.com/huggingface/distilbert-8cf3380435b5
|
||||
) by Victor Sanh, Lysandre Debut and Thomas Wolf.
|
||||
1. **[BERT](https://huggingface.co/transformers/model_doc/bert.html)** (from Google) released with the paper [BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding](https://arxiv.org/abs/1810.04805) by Jacob Devlin, Ming-Wei Chang, Kenton Lee and Kristina Toutanova.
|
||||
2. **[GPT](https://huggingface.co/transformers/model_doc/gpt.html)** (from OpenAI) released with the paper [Improving Language Understanding by Generative Pre-Training](https://blog.openai.com/language-unsupervised/) by Alec Radford, Karthik Narasimhan, Tim Salimans and Ilya Sutskever.
|
||||
3. **[GPT-2](https://huggingface.co/transformers/model_doc/gpt2.html)** (from OpenAI) released with the paper [Language Models are Unsupervised Multitask Learners](https://blog.openai.com/better-language-models/) by Alec Radford*, Jeffrey Wu*, Rewon Child, David Luan, Dario Amodei** and Ilya Sutskever**.
|
||||
4. **[Transformer-XL](https://huggingface.co/transformers/model_doc/transformerxl.html)** (from Google/CMU) released with the paper [Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context](https://arxiv.org/abs/1901.02860) by Zihang Dai*, Zhilin Yang*, Yiming Yang, Jaime Carbonell, Quoc V. Le, Ruslan Salakhutdinov.
|
||||
5. **[XLNet](https://huggingface.co/transformers/model_doc/xlnet.html)** (from Google/CMU) released with the paper [XLNet: Generalized Autoregressive Pretraining for Language Understanding](https://arxiv.org/abs/1906.08237) by Zhilin Yang*, Zihang Dai*, Yiming Yang, Jaime Carbonell, Ruslan Salakhutdinov, Quoc V. Le.
|
||||
6. **[XLM](https://huggingface.co/transformers/model_doc/xlm.html)** (from Facebook) released together with the paper [Cross-lingual Language Model Pretraining](https://arxiv.org/abs/1901.07291) by Guillaume Lample and Alexis Conneau.
|
||||
7. **[RoBERTa](https://huggingface.co/transformers/model_doc/roberta.html)** (from Facebook), released together with the paper a [Robustly Optimized BERT Pretraining Approach](https://arxiv.org/abs/1907.11692) by Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, Veselin Stoyanov.
|
||||
8. **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace), released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same method has been applied to compress GPT2 into [DistilGPT2](https://github.com/huggingface/transformers/tree/master/examples/distillation), RoBERTa into [DistilRoBERTa](https://github.com/huggingface/transformers/tree/master/examples/distillation), Multilingual BERT into [DistilmBERT](https://github.com/huggingface/transformers/tree/master/examples/distillation) and a German version of DistilBERT.
|
||||
9. **[CTRL](https://huggingface.co/transformers/model_doc/ctrl.html)** (from Salesforce) released with the paper [CTRL: A Conditional Transformer Language Model for Controllable Generation](https://arxiv.org/abs/1909.05858) by Nitish Shirish Keskar*, Bryan McCann*, Lav R. Varshney, Caiming Xiong and Richard Socher.
|
||||
10. **[CamemBERT](https://huggingface.co/transformers/model_doc/camembert.html)** (from Inria/Facebook/Sorbonne) released with the paper [CamemBERT: a Tasty French Language Model](https://arxiv.org/abs/1911.03894) by Louis Martin*, Benjamin Muller*, Pedro Javier Ortiz Suárez*, Yoann Dupont, Laurent Romary, Éric Villemonte de la Clergerie, Djamé Seddah and Benoît Sagot.
|
||||
11. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.
|
||||
12. **[T5](https://huggingface.co/transformers/model_doc/t5.html)** (from Google AI) released with the paper [Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer](https://arxiv.org/abs/1910.10683) by Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu.
|
||||
13. **[XLM-RoBERTa](https://huggingface.co/transformers/model_doc/xlmroberta.html)** (from Facebook AI), released together with the paper [Unsupervised Cross-lingual Representation Learning at Scale](https://arxiv.org/abs/1911.02116) by Alexis Conneau*, Kartikay Khandelwal*, Naman Goyal, Vishrav Chaudhary, Guillaume Wenzek, Francisco Guzmán, Edouard Grave, Myle Ott, Luke Zettlemoyer and Veselin Stoyanov.
|
||||
14. **[MMBT](https://github.com/facebookresearch/mmbt/)** (from Facebook), released together with the paper a [Supervised Multimodal Bitransformers for Classifying Images and Text](https://arxiv.org/pdf/1909.02950.pdf) by Douwe Kiela, Suvrat Bhooshan, Hamed Firooz, Davide Testuggine.
|
||||
15. **[FlauBERT](https://huggingface.co/transformers/model_doc/flaubert.html)** (from CNRS) released with the paper [FlauBERT: Unsupervised Language Model Pre-training for French](https://arxiv.org/abs/1912.05372) by Hang Le, Loïc Vial, Jibril Frej, Vincent Segonne, Maximin Coavoux, Benjamin Lecouteux, Alexandre Allauzen, Benoît Crabbé, Laurent Besacier, Didier Schwab.
|
||||
16. **[BART](https://huggingface.co/transformers/model_doc/bart.html)** (from Facebook) released with the paper [BART: Denoising Sequence-to-Sequence Pre-training for Natural Language Generation, Translation, and Comprehension](https://arxiv.org/pdf/1910.13461.pdf) by Mike Lewis, Yinhan Liu, Naman Goyal, Marjan Ghazvininejad, Abdelrahman Mohamed, Omer Levy, Ves Stoyanov and Luke Zettlemoyer.
|
||||
17. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang Luong, Quoc V. Le, Christopher D. Manning.
|
||||
18. **[DialoGPT](https://huggingface.co/transformers/model_doc/dialogpt.html)** (from Microsoft Research) released with the paper [DialoGPT: Large-Scale Generative Pre-training for Conversational Response Generation](https://arxiv.org/abs/1911.00536) by Yizhe Zhang, Siqi Sun, Michel Galley, Yen-Chun Chen, Chris Brockett, Xiang Gao, Jianfeng Gao, Jingjing Liu, Bill Dolan.
|
||||
19. **[Reformer](https://huggingface.co/transformers/model_doc/reformer.html)** (from Google Research) released with the paper [Reformer: The Efficient Transformer](https://arxiv.org/abs/2001.04451) by Nikita Kitaev, Łukasz Kaiser, Anselm Levskaya.
|
||||
20. **[MarianMT](https://huggingface.co/transformers/model_doc/marian.html)** Machine translation models trained using [OPUS](http://opus.nlpl.eu/) data by Jörg Tiedemann. The [Marian Framework](https://marian-nmt.github.io/) is being developed by the Microsoft Translator Team.
|
||||
21. **[Longformer](https://huggingface.co/transformers/model_doc/longformer.html)** (from AllenAI) released with the paper [Longformer: The Long-Document Transformer](https://arxiv.org/abs/2004.05150) by Iz Beltagy, Matthew E. Peters, Arman Cohan.
|
||||
22. **[Other community models](https://huggingface.co/models)**, contributed by the [community](https://huggingface.co/users).
|
||||
23. Want to contribute a new model? We have added a **detailed guide and templates** to guide you in the process of adding a new model. You can find them in the [`templates`](./templates) folder of the repository. Be sure to check the [contributing guidelines](./CONTRIBUTING.md) and contact the maintainers or open an issue to collect feedbacks before starting your PR.
|
||||
|
||||
These implementations have been tested on several datasets (see the example scripts) and should match the performances of the original implementations (e.g. ~93 F1 on SQuAD for BERT Whole-Word-Masking, ~88 F1 on RocStories for OpenAI GPT, ~18.3 perplexity on WikiText 103 for Transformer-XL, ~0.916 Peason R coefficient on STS-B for XLNet). You can find more details on the performances in the Examples section of the [documentation](https://huggingface.co/transformers/examples.html).
|
||||
These implementations have been tested on several datasets (see the example scripts) and should match the performances of the original implementations (e.g. ~93 F1 on SQuAD for BERT Whole-Word-Masking, ~88 F1 on RocStories for OpenAI GPT, ~18.3 perplexity on WikiText 103 for Transformer-XL, ~0.916 Pearson R coefficient on STS-B for XLNet). You can find more details on the performances in the Examples section of the [documentation](https://huggingface.co/transformers/examples.html).
|
||||
|
||||
## Online demo
|
||||
|
||||
@ -136,16 +190,19 @@ import torch
|
||||
from transformers import *
|
||||
|
||||
# Transformers has a unified API
|
||||
# for 8 transformer architectures and 30 pretrained weights.
|
||||
# for 10 transformer architectures and 30 pretrained weights.
|
||||
# Model | Tokenizer | Pretrained weights shortcut
|
||||
MODELS = [(BertModel, BertTokenizer, 'bert-base-uncased'),
|
||||
(OpenAIGPTModel, OpenAIGPTTokenizer, 'openai-gpt'),
|
||||
(GPT2Model, GPT2Tokenizer, 'gpt2'),
|
||||
(CTRLModel, CTRLTokenizer, 'ctrl'),
|
||||
(TransfoXLModel, TransfoXLTokenizer, 'transfo-xl-wt103'),
|
||||
(XLNetModel, XLNetTokenizer, 'xlnet-base-cased'),
|
||||
(XLMModel, XLMTokenizer, 'xlm-mlm-enfr-1024'),
|
||||
(DistilBertModel, DistilBertTokenizer, 'distilbert-base-uncased'),
|
||||
(RobertaModel, RobertaTokenizer, 'roberta-base')]
|
||||
(DistilBertModel, DistilBertTokenizer, 'distilbert-base-cased'),
|
||||
(RobertaModel, RobertaTokenizer, 'roberta-base'),
|
||||
(XLMRobertaModel, XLMRobertaTokenizer, 'xlm-roberta-base'),
|
||||
]
|
||||
|
||||
# To use TensorFlow 2.0 versions of the models, simply prefix the class names with 'TF', e.g. `TFRobertaModel` is the TF 2.0 counterpart of the PyTorch model `RobertaModel`
|
||||
|
||||
@ -162,35 +219,35 @@ for model_class, tokenizer_class, pretrained_weights in MODELS:
|
||||
|
||||
# Each architecture is provided with several class for fine-tuning on down-stream tasks, e.g.
|
||||
BERT_MODEL_CLASSES = [BertModel, BertForPreTraining, BertForMaskedLM, BertForNextSentencePrediction,
|
||||
BertForSequenceClassification, BertForMultipleChoice, BertForTokenClassification,
|
||||
BertForQuestionAnswering]
|
||||
BertForSequenceClassification, BertForTokenClassification, BertForQuestionAnswering]
|
||||
|
||||
# All the classes for an architecture can be initiated from pretrained weights for this architecture
|
||||
# Note that additional weights added for fine-tuning are only initialized
|
||||
# and need to be trained on the down-stream task
|
||||
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
|
||||
pretrained_weights = 'bert-base-uncased'
|
||||
tokenizer = BertTokenizer.from_pretrained(pretrained_weights)
|
||||
for model_class in BERT_MODEL_CLASSES:
|
||||
# Load pretrained model/tokenizer
|
||||
model = model_class.from_pretrained('bert-base-uncased')
|
||||
model = model_class.from_pretrained(pretrained_weights)
|
||||
|
||||
# Models can return full list of hidden-states & attentions weights at each layer
|
||||
model = model_class.from_pretrained(pretrained_weights,
|
||||
output_hidden_states=True,
|
||||
output_attentions=True)
|
||||
input_ids = torch.tensor([tokenizer.encode("Let's see all hidden-states and attentions on this text")])
|
||||
all_hidden_states, all_attentions = model(input_ids)[-2:]
|
||||
# Models can return full list of hidden-states & attentions weights at each layer
|
||||
model = model_class.from_pretrained(pretrained_weights,
|
||||
output_hidden_states=True,
|
||||
output_attentions=True)
|
||||
input_ids = torch.tensor([tokenizer.encode("Let's see all hidden-states and attentions on this text")])
|
||||
all_hidden_states, all_attentions = model(input_ids)[-2:]
|
||||
|
||||
# Models are compatible with Torchscript
|
||||
model = model_class.from_pretrained(pretrained_weights, torchscript=True)
|
||||
traced_model = torch.jit.trace(model, (input_ids,))
|
||||
# Models are compatible with Torchscript
|
||||
model = model_class.from_pretrained(pretrained_weights, torchscript=True)
|
||||
traced_model = torch.jit.trace(model, (input_ids,))
|
||||
|
||||
# Simple serialization for models and tokenizers
|
||||
model.save_pretrained('./directory/to/save/') # save
|
||||
model = model_class.from_pretrained('./directory/to/save/') # re-load
|
||||
tokenizer.save_pretrained('./directory/to/save/') # save
|
||||
tokenizer = tokenizer_class.from_pretrained('./directory/to/save/') # re-load
|
||||
# Simple serialization for models and tokenizers
|
||||
model.save_pretrained('./directory/to/save/') # save
|
||||
model = model_class.from_pretrained('./directory/to/save/') # re-load
|
||||
tokenizer.save_pretrained('./directory/to/save/') # save
|
||||
tokenizer = BertTokenizer.from_pretrained('./directory/to/save/') # re-load
|
||||
|
||||
# SOTA examples for GLUE, SQUAD, text generation...
|
||||
# SOTA examples for GLUE, SQUAD, text generation...
|
||||
```
|
||||
|
||||
## Quick tour TF 2.0 training and PyTorch interoperability
|
||||
@ -200,7 +257,7 @@ Let's do a quick example of how a TensorFlow 2.0 model can be trained in 12 line
|
||||
```python
|
||||
import tensorflow as tf
|
||||
import tensorflow_datasets
|
||||
from pytorch_transformers import *
|
||||
from transformers import *
|
||||
|
||||
# Load dataset, tokenizer, model from pretrained model/vocabulary
|
||||
tokenizer = BertTokenizer.from_pretrained('bert-base-cased')
|
||||
@ -208,12 +265,12 @@ model = TFBertForSequenceClassification.from_pretrained('bert-base-cased')
|
||||
data = tensorflow_datasets.load('glue/mrpc')
|
||||
|
||||
# Prepare dataset for GLUE as a tf.data.Dataset instance
|
||||
train_dataset = glue_convert_examples_to_features(data['train'], tokenizer, 128, 'mrpc')
|
||||
valid_dataset = glue_convert_examples_to_features(data['validation'], tokenizer, 128, 'mrpc')
|
||||
train_dataset = glue_convert_examples_to_features(data['train'], tokenizer, max_length=128, task='mrpc')
|
||||
valid_dataset = glue_convert_examples_to_features(data['validation'], tokenizer, max_length=128, task='mrpc')
|
||||
train_dataset = train_dataset.shuffle(100).batch(32).repeat(2)
|
||||
valid_dataset = valid_dataset.batch(64)
|
||||
|
||||
# Prepare training: Compile tf.keras model with optimizer, loss and learning rate schedule
|
||||
# Prepare training: Compile tf.keras model with optimizer, loss and learning rate schedule
|
||||
optimizer = tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08, clipnorm=1.0)
|
||||
loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
|
||||
metric = tf.keras.metrics.SparseCategoricalAccuracy('accuracy')
|
||||
@ -231,22 +288,29 @@ pytorch_model = BertForSequenceClassification.from_pretrained('./save/', from_tf
|
||||
sentence_0 = "This research was consistent with his findings."
|
||||
sentence_1 = "His findings were compatible with this research."
|
||||
sentence_2 = "His findings were not compatible with this research."
|
||||
inputs_1 = tokenizer.encode_plus(sentence_0, sentence_1, add_special_tokens=True, return_tensors='pt')
|
||||
inputs_2 = tokenizer.encode_plus(sentence_0, sentence_2, add_special_tokens=True, return_tensors='pt')
|
||||
inputs_1 = tokenizer(sentence_0, sentence_1, add_special_tokens=True, return_tensors='pt')
|
||||
inputs_2 = tokenizer(sentence_0, sentence_2, add_special_tokens=True, return_tensors='pt')
|
||||
|
||||
pred_1 = pytorch_model(inputs_1['input_ids'], token_type_ids=inputs_1['token_type_ids'])[0].argmax().item()
|
||||
pred_2 = pytorch_model(inputs_2['input_ids'], token_type_ids=inputs_2['token_type_ids'])[0].argmax().item()
|
||||
|
||||
pred_1 = pytorch_model(**inputs_1)[0].argmax().item()
|
||||
pred_2 = pytorch_model(**inputs_2)[0].argmax().item()
|
||||
print("sentence_1 is", "a paraphrase" if pred_1 else "not a paraphrase", "of sentence_0")
|
||||
print("sentence_2 is", "a paraphrase" if pred_2 else "not a paraphrase", "of sentence_0")
|
||||
```
|
||||
|
||||
## Quick tour of the fine-tuning/usage scripts
|
||||
|
||||
**Important**
|
||||
Before running the fine-tuning scripts, please read the
|
||||
[instructions](#run-the-examples) on how to
|
||||
setup your environment to run the examples.
|
||||
|
||||
The library comprises several example scripts with SOTA performances for NLU and NLG tasks:
|
||||
|
||||
- `run_glue.py`: an example fine-tuning Bert, XLNet and XLM on nine different GLUE tasks (*sequence-level classification*)
|
||||
- `run_squad.py`: an example fine-tuning Bert, XLNet and XLM on the question answering dataset SQuAD 2.0 (*token-level classification*)
|
||||
- `run_generation.py`: an example using GPT, GPT-2, Transformer-XL and XLNet for conditional language generation
|
||||
- `run_glue.py`: an example fine-tuning sequence classification models on nine different GLUE tasks (*sequence-level classification*)
|
||||
- `run_squad.py`: an example fine-tuning question answering models on the question answering dataset SQuAD 2.0 (*token-level classification*)
|
||||
- `run_ner.py`: an example fine-tuning token classification models on named entity recognition (*token-level classification*)
|
||||
- `run_generation.py`: an example using GPT, GPT-2, CTRL, Transformer-XL and XLNet for conditional language generation
|
||||
- other model-specific examples (see the documentation).
|
||||
|
||||
Here are three quick usage examples for these scripts:
|
||||
@ -255,7 +319,7 @@ Here are three quick usage examples for these scripts:
|
||||
|
||||
The [General Language Understanding Evaluation (GLUE) benchmark](https://gluebenchmark.com/) is a collection of nine sentence- or sentence-pair language understanding tasks for evaluating and analyzing natural language understanding systems.
|
||||
|
||||
Before running anyone of these GLUE tasks you should download the
|
||||
Before running any of these GLUE tasks you should download the
|
||||
[GLUE data](https://gluebenchmark.com/tasks) by running
|
||||
[this script](https://gist.github.com/W4ngatang/60c2bdb54d156a41194446737ce03e2e)
|
||||
and unpack it to some directory `$GLUE_DIR`.
|
||||
@ -270,17 +334,15 @@ pip install -r ./examples/requirements.txt
|
||||
export GLUE_DIR=/path/to/glue
|
||||
export TASK_NAME=MRPC
|
||||
|
||||
python ./examples/run_glue.py \
|
||||
--model_type bert \
|
||||
python ./examples/text-classification/run_glue.py \
|
||||
--model_name_or_path bert-base-uncased \
|
||||
--task_name $TASK_NAME \
|
||||
--do_train \
|
||||
--do_eval \
|
||||
--do_lower_case \
|
||||
--data_dir $GLUE_DIR/$TASK_NAME \
|
||||
--max_seq_length 128 \
|
||||
--per_gpu_eval_batch_size=8 \
|
||||
--per_gpu_train_batch_size=8 \
|
||||
--per_device_eval_batch_size=8 \
|
||||
--per_device_train_batch_size=8 \
|
||||
--learning_rate 2e-5 \
|
||||
--num_train_epochs 3.0 \
|
||||
--output_dir /tmp/$TASK_NAME/
|
||||
@ -298,8 +360,7 @@ Parallel training is a simple way to use several GPUs (but is slower and less fl
|
||||
```shell
|
||||
export GLUE_DIR=/path/to/glue
|
||||
|
||||
python ./examples/run_glue.py \
|
||||
--model_type xlnet \
|
||||
python ./examples/text-classification/run_glue.py \
|
||||
--model_name_or_path xlnet-large-cased \
|
||||
--do_train \
|
||||
--do_eval \
|
||||
@ -307,8 +368,8 @@ python ./examples/run_glue.py \
|
||||
--data_dir=${GLUE_DIR}/STS-B \
|
||||
--output_dir=./proc_data/sts-b-110 \
|
||||
--max_seq_length=128 \
|
||||
--per_gpu_eval_batch_size=8 \
|
||||
--per_gpu_train_batch_size=8 \
|
||||
--per_device_eval_batch_size=8 \
|
||||
--per_device_train_batch_size=8 \
|
||||
--gradient_accumulation_steps=1 \
|
||||
--max_steps=1200 \
|
||||
--model_name=xlnet-large-cased \
|
||||
@ -324,17 +385,15 @@ On this machine we thus have a batch size of 32, please increase `gradient_accum
|
||||
This example code fine-tunes the Bert Whole Word Masking model on the Microsoft Research Paraphrase Corpus (MRPC) corpus using distributed training on 8 V100 GPUs to reach a F1 > 92.
|
||||
|
||||
```bash
|
||||
python -m torch.distributed.launch --nproc_per_node 8 ./examples/run_glue.py \
|
||||
--model_type bert \
|
||||
python -m torch.distributed.launch --nproc_per_node 8 ./examples/text-classification/run_glue.py \
|
||||
--model_name_or_path bert-large-uncased-whole-word-masking \
|
||||
--task_name MRPC \
|
||||
--do_train \
|
||||
--do_eval \
|
||||
--do_lower_case \
|
||||
--data_dir $GLUE_DIR/MRPC/ \
|
||||
--max_seq_length 128 \
|
||||
--per_gpu_eval_batch_size=8 \
|
||||
--per_gpu_train_batch_size=8 \
|
||||
--per_device_eval_batch_size=8 \
|
||||
--per_device_train_batch_size=8 \
|
||||
--learning_rate 2e-5 \
|
||||
--num_train_epochs 3.0 \
|
||||
--output_dir /tmp/mrpc_output/ \
|
||||
@ -358,12 +417,11 @@ Training with these hyper-parameters gave us the following results:
|
||||
This example code fine-tunes BERT on the SQuAD dataset using distributed training on 8 V100 GPUs and Bert Whole Word Masking uncased model to reach a F1 > 93 on SQuAD:
|
||||
|
||||
```bash
|
||||
python -m torch.distributed.launch --nproc_per_node=8 ./examples/run_squad.py \
|
||||
python -m torch.distributed.launch --nproc_per_node=8 ./examples/question-answering/run_squad.py \
|
||||
--model_type bert \
|
||||
--model_name_or_path bert-large-uncased-whole-word-masking \
|
||||
--do_train \
|
||||
--do_eval \
|
||||
--do_lower_case \
|
||||
--train_file $SQUAD_DIR/train-v1.1.json \
|
||||
--predict_file $SQUAD_DIR/dev-v1.1.json \
|
||||
--learning_rate 3e-5 \
|
||||
@ -371,8 +429,8 @@ python -m torch.distributed.launch --nproc_per_node=8 ./examples/run_squad.py \
|
||||
--max_seq_length 384 \
|
||||
--doc_stride 128 \
|
||||
--output_dir ../models/wwm_uncased_finetuned_squad/ \
|
||||
--per_gpu_eval_batch_size=3 \
|
||||
--per_gpu_train_batch_size=3 \
|
||||
--per_device_eval_batch_size=3 \
|
||||
--per_device_train_batch_size=3 \
|
||||
```
|
||||
|
||||
Training with these hyper-parameters gave us the following results:
|
||||
@ -384,20 +442,120 @@ python $SQUAD_DIR/evaluate-v1.1.py $SQUAD_DIR/dev-v1.1.json ../models/wwm_uncase
|
||||
|
||||
This is the model provided as `bert-large-uncased-whole-word-masking-finetuned-squad`.
|
||||
|
||||
### `run_generation.py`: Text generation with GPT, GPT-2, Transformer-XL and XLNet
|
||||
### `run_generation.py`: Text generation with GPT, GPT-2, CTRL, Transformer-XL and XLNet
|
||||
|
||||
A conditional generation script is also included to generate text from a prompt.
|
||||
The generation script includes the [tricks](https://github.com/rusiaaman/XLNet-gen#methodology) proposed by Aman Rusia to get high quality generation with memory models like Transformer-XL and XLNet (include a predefined text to make short inputs longer).
|
||||
The generation script includes the [tricks](https://github.com/rusiaaman/XLNet-gen#methodology) proposed by Aman Rusia to get high-quality generation with memory models like Transformer-XL and XLNet (include a predefined text to make short inputs longer).
|
||||
|
||||
Here is how to run the script with the small version of OpenAI GPT-2 model:
|
||||
|
||||
```shell
|
||||
python ./examples/run_generation.py \
|
||||
python ./examples/text-generation/run_generation.py \
|
||||
--model_type=gpt2 \
|
||||
--length=20 \
|
||||
--model_name_or_path=gpt2 \
|
||||
```
|
||||
|
||||
and from the Salesforce CTRL model:
|
||||
```shell
|
||||
python ./examples/text-generation/run_generation.py \
|
||||
--model_type=ctrl \
|
||||
--length=20 \
|
||||
--model_name_or_path=ctrl \
|
||||
--temperature=0 \
|
||||
--repetition_penalty=1.2 \
|
||||
```
|
||||
|
||||
## Quick tour of model sharing
|
||||
|
||||
Starting with `v2.2.2`, you can now upload and share your fine-tuned models with the community, using the <abbr title="Command-line interface">CLI</abbr> that's built-in to the library.
|
||||
|
||||
**First, create an account on [https://huggingface.co/join](https://huggingface.co/join)**. Optionally, join an existing organization or create a new one. Then:
|
||||
|
||||
```shell
|
||||
transformers-cli login
|
||||
# log in using the same credentials as on huggingface.co
|
||||
```
|
||||
Upload your model:
|
||||
```shell
|
||||
transformers-cli upload ./path/to/pretrained_model/
|
||||
|
||||
# ^^ Upload folder containing weights/tokenizer/config
|
||||
# saved via `.save_pretrained()`
|
||||
|
||||
transformers-cli upload ./config.json [--filename folder/foobar.json]
|
||||
|
||||
# ^^ Upload a single file
|
||||
# (you can optionally override its filename, which can be nested inside a folder)
|
||||
```
|
||||
|
||||
If you want your model to be namespaced by your organization name rather than your username, add the following flag to any command:
|
||||
```shell
|
||||
--organization organization_name
|
||||
```
|
||||
|
||||
Your model will then be accessible through its identifier, a concatenation of your username (or organization name) and the folder name above:
|
||||
```python
|
||||
"username/pretrained_model"
|
||||
# or if an org:
|
||||
"organization_name/pretrained_model"
|
||||
```
|
||||
|
||||
**Please add a README.md model card** to the repo under `model_cards/` with: model description, training params (dataset, preprocessing, hardware used, hyperparameters), evaluation results, intended uses & limitations, etc.
|
||||
|
||||
Your model now has a page on huggingface.co/models 🔥
|
||||
|
||||
Anyone can load it from code:
|
||||
```python
|
||||
tokenizer = AutoTokenizer.from_pretrained("namespace/pretrained_model")
|
||||
model = AutoModel.from_pretrained("namespace/pretrained_model")
|
||||
```
|
||||
|
||||
List all your files on S3:
|
||||
```shell
|
||||
transformers-cli s3 ls
|
||||
```
|
||||
|
||||
You can also delete unneeded files:
|
||||
|
||||
```shell
|
||||
transformers-cli s3 rm …
|
||||
```
|
||||
|
||||
## Quick tour of pipelines
|
||||
|
||||
New in version `v2.3`: `Pipeline` are high-level objects which automatically handle tokenization, running your data through a transformers model
|
||||
and outputting the result in a structured object.
|
||||
|
||||
You can create `Pipeline` objects for the following down-stream tasks:
|
||||
|
||||
- `feature-extraction`: Generates a tensor representation for the input sequence
|
||||
- `ner`: Generates named entity mapping for each word in the input sequence.
|
||||
- `sentiment-analysis`: Gives the polarity (positive / negative) of the whole input sequence.
|
||||
- `text-classification`: Initialize a `TextClassificationPipeline` directly, or see `sentiment-analysis` for an example.
|
||||
- `question-answering`: Provided some context and a question refering to the context, it will extract the answer to the question in the context.
|
||||
- `fill-mask`: Takes an input sequence containing a masked token (e.g. `<mask>`) and return list of most probable filled sequences, with their probabilities.
|
||||
- `summarization`
|
||||
- `translation_xx_to_yy`
|
||||
|
||||
```python
|
||||
>>> from transformers import pipeline
|
||||
|
||||
# Allocate a pipeline for sentiment-analysis
|
||||
>>> nlp = pipeline('sentiment-analysis')
|
||||
>>> nlp('We are very happy to include pipeline into the transformers repository.')
|
||||
[{'label': 'POSITIVE', 'score': 0.9978193640708923}]
|
||||
|
||||
# Allocate a pipeline for question-answering
|
||||
>>> nlp = pipeline('question-answering')
|
||||
>>> nlp({
|
||||
... 'question': 'What is the name of the repository ?',
|
||||
... 'context': 'Pipeline have been included in the huggingface/transformers repository'
|
||||
... })
|
||||
{'score': 0.5135612454720828, 'start': 35, 'end': 59, 'answer': 'huggingface/transformers'}
|
||||
|
||||
```
|
||||
|
||||
## Migrating from pytorch-transformers to transformers
|
||||
|
||||
Here is a quick summary of what you should take care of when migrating from `pytorch-transformers` to `transformers`.
|
||||
@ -417,9 +575,9 @@ Here is a quick summary of what you should take care of when migrating from `pyt
|
||||
|
||||
### Models always output `tuples`
|
||||
|
||||
The main breaking change when migrating from `pytorch-pretrained-bert` to `transformers` is that the models forward method always outputs a `tuple` with various elements depending on the model and the configuration parameters.
|
||||
The main breaking change when migrating from `pytorch-pretrained-bert` to `transformers` is that every model's forward method always outputs a `tuple` with various elements depending on the model and the configuration parameters.
|
||||
|
||||
The exact content of the tuples for each model are detailed in the models' docstrings and the [documentation](https://huggingface.co/transformers/).
|
||||
The exact content of the tuples for each model is detailed in the models' docstrings and the [documentation](https://huggingface.co/transformers/).
|
||||
|
||||
In pretty much every case, you will be fine by taking the first element of the output as the output you previously used in `pytorch-pretrained-bert`.
|
||||
|
||||
@ -445,13 +603,17 @@ outputs = model(input_ids, labels=labels)
|
||||
loss, logits, attentions = outputs
|
||||
```
|
||||
|
||||
### Using hidden states
|
||||
|
||||
By enabling the configuration option `output_hidden_states`, it was possible to retrieve the last hidden states of the encoder. In `pytorch-transformers` as well as `transformers` the return value has changed slightly: `all_hidden_states` now also includes the hidden state of the embeddings in addition to those of the encoding layers. This allows users to easily access the embeddings final state.
|
||||
|
||||
### Serialization
|
||||
|
||||
Breaking change in the `from_pretrained()`method:
|
||||
Breaking change in the `from_pretrained()` method:
|
||||
|
||||
1. Models are now set in evaluation mode by default when instantiated with the `from_pretrained()` method. To train them don't forget to set them back in training mode (`model.train()`) to activate the dropout modules.
|
||||
1. Models are now set in evaluation mode by default when instantiated with the `from_pretrained()` method. To train them, don't forget to set them back in training mode (`model.train()`) to activate the dropout modules.
|
||||
|
||||
2. The additional `*input` and `**kwargs` arguments supplied to the `from_pretrained()` method used to be directly passed to the underlying model's class `__init__()` method. They are now used to update the model configuration attribute instead which can break derived model classes build based on the previous `BertForSequenceClassification` examples. We are working on a way to mitigate this breaking change in [#866](https://github.com/huggingface/transformers/pull/866) by forwarding the the model `__init__()` method (i) the provided positional arguments and (ii) the keyword arguments which do not match any configuration class attributes.
|
||||
2. The additional `*input` and `**kwargs` arguments supplied to the `from_pretrained()` method used to be directly passed to the underlying model's class `__init__()` method. They are now used to update the model configuration attribute instead, which can break derived model classes built based on the previous `BertForSequenceClassification` examples. We are working on a way to mitigate this breaking change in [#866](https://github.com/huggingface/transformers/pull/866) by forwarding the the model's `__init__()` method (i) the provided positional arguments and (ii) the keyword arguments which do not match any configuration class attributes.
|
||||
|
||||
Also, while not a breaking change, the serialization methods have been standardized and you probably should switch to the new method `save_pretrained(save_directory)` if you were using any other serialization method before.
|
||||
|
||||
@ -496,12 +658,12 @@ Here is a conversion examples from `BertAdam` with a linear warmup and decay sch
|
||||
# Parameters:
|
||||
lr = 1e-3
|
||||
max_grad_norm = 1.0
|
||||
num_total_steps = 1000
|
||||
num_training_steps = 1000
|
||||
num_warmup_steps = 100
|
||||
warmup_proportion = float(num_warmup_steps) / float(num_total_steps) # 0.1
|
||||
warmup_proportion = float(num_warmup_steps) / float(num_training_steps) # 0.1
|
||||
|
||||
### Previously BertAdam optimizer was instantiated like this:
|
||||
optimizer = BertAdam(model.parameters(), lr=lr, schedule='warmup_linear', warmup=warmup_proportion, t_total=num_total_steps)
|
||||
optimizer = BertAdam(model.parameters(), lr=lr, schedule='warmup_linear', warmup=warmup_proportion, t_total=num_training_steps)
|
||||
### and used like this:
|
||||
for batch in train_data:
|
||||
loss = model(batch)
|
||||
@ -510,9 +672,10 @@ for batch in train_data:
|
||||
|
||||
### In Transformers, optimizer and schedules are splitted and instantiated like this:
|
||||
optimizer = AdamW(model.parameters(), lr=lr, correct_bias=False) # To reproduce BertAdam specific behavior set correct_bias=False
|
||||
scheduler = WarmupLinearSchedule(optimizer, warmup_steps=num_warmup_steps, t_total=num_total_steps) # PyTorch scheduler
|
||||
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=num_training_steps) # PyTorch scheduler
|
||||
### and used like this:
|
||||
for batch in train_data:
|
||||
model.train()
|
||||
loss = model(batch)
|
||||
loss.backward()
|
||||
torch.nn.utils.clip_grad_norm_(model.parameters(), max_grad_norm) # Gradient clipping is not in AdamW anymore (so you can use amp without issue)
|
||||
@ -523,4 +686,13 @@ for batch in train_data:
|
||||
|
||||
## Citation
|
||||
|
||||
At the moment, there is no paper associated to Transformers but we are working on preparing one. In the meantime, please include a mention of the library and a link to the present repository if you use this work in a published or open-source project.
|
||||
We now have a paper you can cite for the 🤗 Transformers library:
|
||||
```bibtex
|
||||
@article{Wolf2019HuggingFacesTS,
|
||||
title={HuggingFace's Transformers: State-of-the-art Natural Language Processing},
|
||||
author={Thomas Wolf and Lysandre Debut and Victor Sanh and Julien Chaumond and Clement Delangue and Anthony Moi and Pierric Cistac and Tim Rault and R'emi Louf and Morgan Funtowicz and Jamie Brew},
|
||||
journal={ArXiv},
|
||||
year={2019},
|
||||
volume={abs/1910.03771}
|
||||
}
|
||||
```
|
||||
|
||||
6
codecov.yml
Normal file
6
codecov.yml
Normal file
@ -0,0 +1,6 @@
|
||||
coverage:
|
||||
status:
|
||||
project:
|
||||
default:
|
||||
informational: true
|
||||
patch: off
|
||||
23
deploy_multi_version_doc.sh
Normal file
23
deploy_multi_version_doc.sh
Normal file
@ -0,0 +1,23 @@
|
||||
cd docs
|
||||
|
||||
function deploy_doc(){
|
||||
echo "Creating doc at commit $1 and pushing to folder $2"
|
||||
git checkout $1
|
||||
if [ ! -z "$2" ]
|
||||
then
|
||||
echo "Pushing version" $2
|
||||
make clean && make html && scp -r -oStrictHostKeyChecking=no _build/html $doc:$dir/$2
|
||||
else
|
||||
echo "Pushing master"
|
||||
make clean && make html && scp -r -oStrictHostKeyChecking=no _build/html/* $doc:$dir
|
||||
fi
|
||||
}
|
||||
|
||||
deploy_doc "master"
|
||||
deploy_doc "b33a385" v1.0.0
|
||||
deploy_doc "fe02e45" v1.1.0
|
||||
deploy_doc "89fd345" v1.2.0
|
||||
deploy_doc "fc9faa8" v2.0.0
|
||||
deploy_doc "3ddce1d" v2.1.1
|
||||
deploy_doc "f2f3294" v2.2.0
|
||||
deploy_doc "d0f8b9a" v2.3.0
|
||||
@ -1,7 +0,0 @@
|
||||
FROM pytorch/pytorch:latest
|
||||
|
||||
RUN git clone https://github.com/NVIDIA/apex.git && cd apex && python setup.py install --cuda_ext --cpp_ext
|
||||
|
||||
RUN pip install transformers
|
||||
|
||||
WORKDIR /workspace
|
||||
26
docker/transformers-cpu/Dockerfile
Normal file
26
docker/transformers-cpu/Dockerfile
Normal file
@ -0,0 +1,26 @@
|
||||
FROM ubuntu:18.04
|
||||
LABEL maintainer="Hugging Face"
|
||||
LABEL repository="transformers"
|
||||
|
||||
RUN apt update && \
|
||||
apt install -y bash \
|
||||
build-essential \
|
||||
git \
|
||||
curl \
|
||||
ca-certificates \
|
||||
python3 \
|
||||
python3-pip && \
|
||||
rm -rf /var/lib/apt/lists
|
||||
|
||||
RUN python3 -m pip install --no-cache-dir --upgrade pip && \
|
||||
python3 -m pip install --no-cache-dir \
|
||||
jupyter \
|
||||
tensorflow-cpu \
|
||||
torch
|
||||
|
||||
WORKDIR /workspace
|
||||
COPY . transformers/
|
||||
RUN cd transformers/ && \
|
||||
python3 -m pip install --no-cache-dir .
|
||||
|
||||
CMD ["/bin/bash"]
|
||||
26
docker/transformers-gpu/Dockerfile
Normal file
26
docker/transformers-gpu/Dockerfile
Normal file
@ -0,0 +1,26 @@
|
||||
FROM nvidia/cuda:10.1-cudnn7-runtime-ubuntu18.04
|
||||
LABEL maintainer="Hugging Face"
|
||||
LABEL repository="transformers"
|
||||
|
||||
RUN apt update && \
|
||||
apt install -y bash \
|
||||
build-essential \
|
||||
git \
|
||||
curl \
|
||||
ca-certificates \
|
||||
python3 \
|
||||
python3-pip && \
|
||||
rm -rf /var/lib/apt/lists
|
||||
|
||||
RUN python3 -m pip install --no-cache-dir --upgrade pip && \
|
||||
python3 -m pip install --no-cache-dir \
|
||||
jupyter \
|
||||
tensorflow \
|
||||
torch
|
||||
|
||||
WORKDIR /workspace
|
||||
COPY . transformers/
|
||||
RUN cd transformers/ && \
|
||||
python3 -m pip install --no-cache-dir .
|
||||
|
||||
CMD ["/bin/bash"]
|
||||
25
docker/transformers-pytorch-cpu/Dockerfile
Normal file
25
docker/transformers-pytorch-cpu/Dockerfile
Normal file
@ -0,0 +1,25 @@
|
||||
FROM ubuntu:18.04
|
||||
LABEL maintainer="Hugging Face"
|
||||
LABEL repository="transformers"
|
||||
|
||||
RUN apt update && \
|
||||
apt install -y bash \
|
||||
build-essential \
|
||||
git \
|
||||
curl \
|
||||
ca-certificates \
|
||||
python3 \
|
||||
python3-pip && \
|
||||
rm -rf /var/lib/apt/lists
|
||||
|
||||
RUN python3 -m pip install --no-cache-dir --upgrade pip && \
|
||||
python3 -m pip install --no-cache-dir \
|
||||
jupyter \
|
||||
torch
|
||||
|
||||
WORKDIR /workspace
|
||||
COPY . transformers/
|
||||
RUN cd transformers/ && \
|
||||
python3 -m pip install --no-cache-dir .
|
||||
|
||||
CMD ["/bin/bash"]
|
||||
25
docker/transformers-pytorch-gpu/Dockerfile
Normal file
25
docker/transformers-pytorch-gpu/Dockerfile
Normal file
@ -0,0 +1,25 @@
|
||||
FROM nvidia/cuda:10.1-cudnn7-runtime-ubuntu18.04
|
||||
LABEL maintainer="Hugging Face"
|
||||
LABEL repository="transformers"
|
||||
|
||||
RUN apt update && \
|
||||
apt install -y bash \
|
||||
build-essential \
|
||||
git \
|
||||
curl \
|
||||
ca-certificates \
|
||||
python3 \
|
||||
python3-pip && \
|
||||
rm -rf /var/lib/apt/lists
|
||||
|
||||
RUN python3 -m pip install --no-cache-dir --upgrade pip && \
|
||||
python3 -m pip install --no-cache-dir \
|
||||
mkl \
|
||||
torch
|
||||
|
||||
WORKDIR /workspace
|
||||
COPY . transformers/
|
||||
RUN cd transformers/ && \
|
||||
python3 -m pip install --no-cache-dir .
|
||||
|
||||
CMD ["/bin/bash"]
|
||||
25
docker/transformers-tensorflow-cpu/Dockerfile
Normal file
25
docker/transformers-tensorflow-cpu/Dockerfile
Normal file
@ -0,0 +1,25 @@
|
||||
FROM ubuntu:18.04
|
||||
LABEL maintainer="Hugging Face"
|
||||
LABEL repository="transformers"
|
||||
|
||||
RUN apt update && \
|
||||
apt install -y bash \
|
||||
build-essential \
|
||||
git \
|
||||
curl \
|
||||
ca-certificates \
|
||||
python3 \
|
||||
python3-pip && \
|
||||
rm -rf /var/lib/apt/lists
|
||||
|
||||
RUN python3 -m pip install --no-cache-dir --upgrade pip && \
|
||||
python3 -m pip install --no-cache-dir \
|
||||
mkl \
|
||||
tensorflow-cpu
|
||||
|
||||
WORKDIR /workspace
|
||||
COPY . transformers/
|
||||
RUN cd transformers/ && \
|
||||
python3 -m pip install --no-cache-dir .
|
||||
|
||||
CMD ["/bin/bash"]
|
||||
25
docker/transformers-tensorflow-gpu/Dockerfile
Normal file
25
docker/transformers-tensorflow-gpu/Dockerfile
Normal file
@ -0,0 +1,25 @@
|
||||
FROM nvidia/cuda:10.1-cudnn7-runtime-ubuntu18.04
|
||||
LABEL maintainer="Hugging Face"
|
||||
LABEL repository="transformers"
|
||||
|
||||
RUN apt update && \
|
||||
apt install -y bash \
|
||||
build-essential \
|
||||
git \
|
||||
curl \
|
||||
ca-certificates \
|
||||
python3 \
|
||||
python3-pip && \
|
||||
rm -rf /var/lib/apt/lists
|
||||
|
||||
RUN python3 -m pip install --no-cache-dir --upgrade pip && \
|
||||
python3 -m pip install --no-cache-dir \
|
||||
mkl \
|
||||
tensorflow
|
||||
|
||||
WORKDIR /workspace
|
||||
COPY . transformers/
|
||||
RUN cd transformers/ && \
|
||||
python3 -m pip install --no-cache-dir .
|
||||
|
||||
CMD ["/bin/bash"]
|
||||
172
docs/README.md
172
docs/README.md
@ -1,25 +1,33 @@
|
||||
# Generating the documentation
|
||||
|
||||
To generate the documentation, you first have to build it. Several packages are necessary to build the doc,
|
||||
you can install them using:
|
||||
you can install them with the following command, at the root of the code repository:
|
||||
|
||||
```bash
|
||||
pip install -r requirements.txt
|
||||
pip install -e ".[docs]"
|
||||
```
|
||||
|
||||
|
||||
---
|
||||
**NOTE**
|
||||
|
||||
You only need to generate the documentation to inspect it locally (if you're planning changes and want to
|
||||
check how they look like before committing for instance). You don't have to commit the built documentation.
|
||||
|
||||
---
|
||||
|
||||
## Packages installed
|
||||
|
||||
Here's an overview of all the packages installed. If you ran the previous command installing all packages from
|
||||
Here's an overview of all the packages installed. If you ran the previous command installing all packages from
|
||||
`requirements.txt`, you do not need to run the following commands.
|
||||
|
||||
Building it requires the package `sphinx` that you can
|
||||
Building it requires the package `sphinx` that you can
|
||||
install using:
|
||||
|
||||
```bash
|
||||
pip install -U sphinx
|
||||
```
|
||||
|
||||
You would also need the custom installed [theme](https://github.com/readthedocs/sphinx_rtd_theme) by
|
||||
You would also need the custom installed [theme](https://github.com/readthedocs/sphinx_rtd_theme) by
|
||||
[Read The Docs](https://readthedocs.org/). You can install it using the following command:
|
||||
|
||||
```bash
|
||||
@ -34,23 +42,19 @@ pip install recommonmark
|
||||
|
||||
## Building the documentation
|
||||
|
||||
Make sure that there is a symlink from the `example` file (in /examples) inside the source folder. Run the followig
|
||||
command to generate it:
|
||||
|
||||
```bash
|
||||
ln -s ../../examples/README.md source/examples.md
|
||||
```
|
||||
|
||||
Once you have setup `sphinx`, you can build the documentation by running the following command in the `/docs` folder:
|
||||
|
||||
```bash
|
||||
make html
|
||||
```
|
||||
|
||||
A folder called ``_build/html`` should have been created. You can now open the file ``_build/html/index.html`` in your
|
||||
browser.
|
||||
|
||||
---
|
||||
**NOTE**
|
||||
|
||||
If you are adding/removing elements from the toc-tree or from any strutural item, it is recommended to clean the build
|
||||
If you are adding/removing elements from the toc-tree or from any structural item, it is recommended to clean the build
|
||||
directory before rebuilding. Run the following command to clean and build:
|
||||
|
||||
```bash
|
||||
@ -65,3 +69,143 @@ It should build the static app that will be available under `/docs/_build/html`
|
||||
|
||||
Accepted files are reStructuredText (.rst) and Markdown (.md). Create a file with its extension and put it
|
||||
in the source directory. You can then link it to the toc-tree by putting the filename without the extension.
|
||||
|
||||
## Preview the documentation in a pull request
|
||||
|
||||
Once you have made your pull request, you can check what the documentation will look like after it's merged by
|
||||
following these steps:
|
||||
|
||||
- Look at the checks at the bottom of the conversation page of your PR (you may need to click on "show all checks" to
|
||||
expand them).
|
||||
- Click on "details" next to the `ci/circleci: build_doc` check.
|
||||
- In the new window, click on the "Artifacts" tab.
|
||||
- Locate the file "docs/_build/html/index.html" (or any specific page you want to check) and click on it to get a
|
||||
preview.
|
||||
|
||||
## Writing Documentation - Specification
|
||||
|
||||
The `huggingface/transformers` documentation follows the
|
||||
[Google documentation](https://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_google.html) style. It is
|
||||
mostly written in ReStructuredText
|
||||
([Sphinx simple documentation](https://www.sphinx-doc.org/en/master/usage/restructuredtext/index.html),
|
||||
[Sourceforge complete documentation](https://docutils.sourceforge.io/docs/ref/rst/restructuredtext.html))
|
||||
|
||||
### Adding a new section
|
||||
|
||||
A section is a page held in the `Notes` toc-tree on the documentation. Adding a new section is done in two steps:
|
||||
|
||||
- Add a new file under `./source`. This file can either be ReStructuredText (.rst) or Markdown (.md).
|
||||
- Link that file in `./source/index.rst` on the correct toc-tree.
|
||||
|
||||
### Adding a new model
|
||||
|
||||
When adding a new model:
|
||||
|
||||
- Create a file `xxx.rst` under `./source/model_doc`.
|
||||
- Link that file in `./source/index.rst` on the `model_doc` toc-tree.
|
||||
- Write a short overview of the model:
|
||||
- Overview with paper & authors
|
||||
- Paper abstract
|
||||
- Tips and tricks and how to use it best
|
||||
- Add the classes that should be linked in the model. This generally includes the configuration, the tokenizer, and
|
||||
every model of that class (the base model, alongside models with additional heads), both in PyTorch and TensorFlow.
|
||||
The order is generally:
|
||||
- Configuration,
|
||||
- Tokenizer
|
||||
- PyTorch base model
|
||||
- PyTorch head models
|
||||
- TensorFlow base model
|
||||
- TensorFlow head models
|
||||
|
||||
These classes should be added using the RST syntax. Usually as follows:
|
||||
```
|
||||
XXXConfig
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.XXXConfig
|
||||
:members:
|
||||
```
|
||||
|
||||
This will include every public method of the configuration. If for some reason you wish for a method not to be
|
||||
displayed in the documentation, you can do so by specifying which methods should be in the docs:
|
||||
|
||||
```
|
||||
XXXTokenizer
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.XXXTokenizer
|
||||
:members: build_inputs_with_special_tokens, get_special_tokens_mask,
|
||||
create_token_type_ids_from_sequences, save_vocabulary
|
||||
|
||||
```
|
||||
|
||||
### Writing source documentation
|
||||
|
||||
Values that should be put in `code` should either be surrounded by double backticks: \`\`like so\`\` or be written as
|
||||
an object using the :obj: syntax: :obj:\`like so\`.
|
||||
|
||||
When mentionning a class, it is recommended to use the :class: syntax as the mentioned class will be automatically
|
||||
linked by Sphinx: :class:\`transformers.XXXClass\`
|
||||
|
||||
When mentioning a function, it is recommended to use the :func: syntax as the mentioned method will be automatically
|
||||
linked by Sphinx: :func:\`transformers.XXXClass.method\`
|
||||
|
||||
Links should be done as so (note the double underscore at the end): \`text for the link <./local-link-or-global-link#loc>\`__
|
||||
|
||||
#### Defining arguments in a method
|
||||
|
||||
Arguments should be defined with the `Args:` prefix, followed by a line return and an indentation.
|
||||
The argument should be followed by its type, with its shape if it is a tensor, and a line return.
|
||||
Another indentation is necessary before writing the description of the argument.
|
||||
|
||||
Here's an example showcasing everything so far:
|
||||
|
||||
```
|
||||
Args:
|
||||
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
|
||||
Indices of input sequence tokens in the vocabulary.
|
||||
|
||||
Indices can be obtained using :class:`transformers.AlbertTokenizer`.
|
||||
See :func:`transformers.PreTrainedTokenizer.encode` and
|
||||
:func:`transformers.PreTrainedTokenizer.__call__` for details.
|
||||
|
||||
`What are input IDs? <../glossary.html#input-ids>`__
|
||||
```
|
||||
|
||||
#### Writing a multi-line code block
|
||||
|
||||
Multi-line code blocks can be useful for displaying examples. They are done like so:
|
||||
|
||||
```
|
||||
Example::
|
||||
|
||||
# first line of code
|
||||
# second line
|
||||
# etc
|
||||
```
|
||||
|
||||
The `Example` string at the beginning can be replaced by anything as long as there are two semicolons following it.
|
||||
|
||||
#### Writing a return block
|
||||
|
||||
Arguments should be defined with the `Args:` prefix, followed by a line return and an indentation.
|
||||
The first line should be the type of the return, followed by a line return. No need to indent further for the elements
|
||||
building the return.
|
||||
|
||||
Here's an example for tuple return, comprising several objects:
|
||||
|
||||
```
|
||||
Returns:
|
||||
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.BertConfig`) and inputs:
|
||||
loss (`optional`, returned when ``masked_lm_labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
|
||||
Total loss as the sum of the masked language modeling loss and the next sequence prediction (classification) loss.
|
||||
prediction_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`)
|
||||
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
|
||||
```
|
||||
|
||||
Here's an example for a single value return:
|
||||
|
||||
```
|
||||
Returns:
|
||||
A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
|
||||
```
|
||||
|
||||
@ -1,29 +0,0 @@
|
||||
alabaster==0.7.12
|
||||
Babel==2.7.0
|
||||
certifi==2019.6.16
|
||||
chardet==3.0.4
|
||||
commonmark==0.9.0
|
||||
docutils==0.14
|
||||
future==0.17.1
|
||||
idna==2.8
|
||||
imagesize==1.1.0
|
||||
Jinja2==2.10.1
|
||||
MarkupSafe==1.1.1
|
||||
packaging==19.0
|
||||
Pygments==2.4.2
|
||||
pyparsing==2.4.0
|
||||
pytz==2019.1
|
||||
recommonmark==0.5.0
|
||||
requests==2.22.0
|
||||
six==1.12.0
|
||||
snowballstemmer==1.9.0
|
||||
Sphinx==2.1.2
|
||||
sphinx-rtd-theme==0.4.3
|
||||
sphinxcontrib-applehelp==1.0.1
|
||||
sphinxcontrib-devhelp==1.0.1
|
||||
sphinxcontrib-htmlhelp==1.0.2
|
||||
sphinxcontrib-jsmath==1.0.1
|
||||
sphinxcontrib-qthelp==1.0.2
|
||||
sphinxcontrib-serializinghtml==1.1.3
|
||||
urllib3==1.25.3
|
||||
sphinx-markdown-tables==0.0.9
|
||||
@ -9,4 +9,8 @@
|
||||
|
||||
.highlight .kn, .highlight .nv, .highlight .s2, .highlight .ow {
|
||||
color: #6670FF;
|
||||
}
|
||||
|
||||
.highlight .gp {
|
||||
color: #FB8D68;
|
||||
}
|
||||
@ -1,4 +1,71 @@
|
||||
huggingface.css
|
||||
/* Our DOM objects */
|
||||
|
||||
/* Version control */
|
||||
|
||||
.version-button {
|
||||
background-color: #6670FF;
|
||||
color: white;
|
||||
border: none;
|
||||
padding: 5px;
|
||||
font-size: 15px;
|
||||
cursor: pointer;
|
||||
}
|
||||
|
||||
.version-button:hover, .version-button:focus {
|
||||
background-color: #A6B0FF;
|
||||
}
|
||||
|
||||
.version-dropdown {
|
||||
display: none;
|
||||
background-color: #6670FF;
|
||||
min-width: 160px;
|
||||
overflow: auto;
|
||||
font-size: 15px;
|
||||
}
|
||||
|
||||
.version-dropdown a {
|
||||
color: white;
|
||||
padding: 3px 4px;
|
||||
text-decoration: none;
|
||||
display: block;
|
||||
}
|
||||
|
||||
.version-dropdown a:hover {
|
||||
background-color: #A6B0FF;
|
||||
}
|
||||
|
||||
.version-show {
|
||||
display: block;
|
||||
}
|
||||
|
||||
/* Framework selector */
|
||||
|
||||
.framework-selector {
|
||||
display: flex;
|
||||
flex-direction: row;
|
||||
justify-content: flex-end;
|
||||
margin-right: 30px;
|
||||
}
|
||||
|
||||
.framework-selector > button {
|
||||
background-color: white;
|
||||
color: #6670FF;
|
||||
border: 1px solid #6670FF;
|
||||
padding: 5px;
|
||||
}
|
||||
|
||||
.framework-selector > button.selected{
|
||||
background-color: #6670FF;
|
||||
color: white;
|
||||
border: 1px solid #6670FF;
|
||||
padding: 5px;
|
||||
}
|
||||
|
||||
/* Copy button */
|
||||
|
||||
a.copybtn {
|
||||
margin: 3px;
|
||||
}
|
||||
|
||||
/* The literal code blocks */
|
||||
.rst-content tt.literal, .rst-content tt.literal, .rst-content code.literal {
|
||||
@ -18,6 +85,7 @@ huggingface.css
|
||||
|
||||
/* The research field on top of the toc tree */
|
||||
.wy-side-nav-search{
|
||||
padding-top: 0;
|
||||
background-color: #6670FF;
|
||||
}
|
||||
|
||||
@ -44,11 +112,11 @@ huggingface.css
|
||||
/* The text items on the toc tree */
|
||||
.wy-menu-vertical a {
|
||||
color: #FFFFDD;
|
||||
font-family: Calibre-Light;
|
||||
font-family: Calibre-Light, sans-serif;
|
||||
}
|
||||
.wy-menu-vertical header, .wy-menu-vertical p.caption{
|
||||
color: white;
|
||||
font-family: Calibre-Light;
|
||||
font-family: Calibre-Light, sans-serif;
|
||||
}
|
||||
|
||||
/* The color inside the selected toc tree block */
|
||||
@ -85,7 +153,7 @@ a {
|
||||
border-right: solid 2px #FB8D68;
|
||||
border-left: solid 2px #FB8D68;
|
||||
color: #FB8D68;
|
||||
font-family: Calibre-Light;
|
||||
font-family: Calibre-Light, sans-serif;
|
||||
border-top: none;
|
||||
font-style: normal !important;
|
||||
}
|
||||
@ -136,14 +204,14 @@ a {
|
||||
|
||||
/* class and method names in doc */
|
||||
.rst-content dl:not(.docutils) tt.descname, .rst-content dl:not(.docutils) tt.descclassname, .rst-content dl:not(.docutils) tt.descname, .rst-content dl:not(.docutils) code.descname, .rst-content dl:not(.docutils) tt.descclassname, .rst-content dl:not(.docutils) code.descclassname{
|
||||
font-family: Calibre;
|
||||
font-family: Calibre, sans-serif;
|
||||
font-size: 20px !important;
|
||||
}
|
||||
|
||||
/* class name in doc*/
|
||||
.rst-content dl:not(.docutils) tt.descname, .rst-content dl:not(.docutils) tt.descname, .rst-content dl:not(.docutils) code.descname{
|
||||
margin-right: 10px;
|
||||
font-family: Calibre-Medium;
|
||||
font-family: Calibre-Medium, sans-serif;
|
||||
}
|
||||
|
||||
/* Method and class parameters */
|
||||
@ -160,17 +228,17 @@ a {
|
||||
|
||||
/* FONTS */
|
||||
body{
|
||||
font-family: Calibre;
|
||||
font-family: Calibre, sans-serif;
|
||||
font-size: 16px;
|
||||
}
|
||||
|
||||
h1 {
|
||||
font-family: Calibre-Thin;
|
||||
font-family: Calibre-Thin, sans-serif;
|
||||
font-size: 70px;
|
||||
}
|
||||
|
||||
h2, .rst-content .toctree-wrapper p.caption, h3, h4, h5, h6, legend{
|
||||
font-family: Calibre-Medium;
|
||||
font-family: Calibre-Medium, sans-serif;
|
||||
}
|
||||
|
||||
@font-face {
|
||||
@ -197,3 +265,40 @@ h2, .rst-content .toctree-wrapper p.caption, h3, h4, h5, h6, legend{
|
||||
font-weight:400;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Nav Links to other parts of huggingface.co
|
||||
*/
|
||||
div.menu {
|
||||
position: absolute;
|
||||
top: 0;
|
||||
right: 0;
|
||||
padding-top: 20px;
|
||||
padding-right: 20px;
|
||||
z-index: 1000;
|
||||
}
|
||||
div.menu a {
|
||||
font-size: 14px;
|
||||
letter-spacing: 0.3px;
|
||||
text-transform: uppercase;
|
||||
color: white;
|
||||
-webkit-font-smoothing: antialiased;
|
||||
background: linear-gradient(0deg, #6671ffb8, #9a66ffb8 50%);
|
||||
padding: 10px 16px 6px 16px;
|
||||
border-radius: 3px;
|
||||
margin-left: 12px;
|
||||
position: relative;
|
||||
}
|
||||
div.menu a:active {
|
||||
top: 1px;
|
||||
}
|
||||
@media (min-width: 768px) and (max-width: 1750px) {
|
||||
.wy-breadcrumbs {
|
||||
margin-top: 32px;
|
||||
}
|
||||
}
|
||||
@media (max-width: 768px) {
|
||||
div.menu {
|
||||
display: none;
|
||||
}
|
||||
}
|
||||
|
||||
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
|
Before Width: | Height: | Size: 14 KiB After Width: | Height: | Size: 7.6 KiB |
322
docs/source/benchmarks.rst
Normal file
322
docs/source/benchmarks.rst
Normal file
@ -0,0 +1,322 @@
|
||||
Benchmarks
|
||||
==========
|
||||
|
||||
Let's take a look at how 🤗 Transformer models can be benchmarked, best practices, and already available benchmarks.
|
||||
|
||||
A notebook explaining in more detail how to benchmark 🤗 Transformer models can be found `here <https://github.com/huggingface/transformers/blob/master/notebooks/05-benchmark.ipynb>`__.
|
||||
|
||||
How to benchmark 🤗 Transformer models
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The classes :class:`~transformers.PyTorchBenchmark` and :class:`~transformers.TensorFlowBenchmark` allow to flexibly benchmark 🤗 Transformer models.
|
||||
The benchmark classes allow us to measure the `peak memory usage` and `required time` for both
|
||||
`inference` and `training`.
|
||||
|
||||
.. note::
|
||||
|
||||
Hereby, `inference` is defined by a single forward pass, and `training` is defined by a single forward pass and backward pass.
|
||||
|
||||
The benchmark classes :class:`~transformers.PyTorchBenchmark` and :class:`~transformers.TensorFlowBenchmark` expect an object of type :class:`~transformers.PyTorchBenchmarkArguments` and :class:`~transformers.TensorFlowBenchmarkArguments`, respectively, for instantiation. :class:`~transformers.PyTorchBenchmarkArguments` and :class:`~transformers.TensorFlowBenchmarkArguments` are data classes and contain all relevant configurations for their corresponding benchmark class.
|
||||
In the following example, it is shown how a BERT model of type `bert-base-cased` can be benchmarked.
|
||||
|
||||
.. code-block::
|
||||
|
||||
>>> ## PYTORCH CODE
|
||||
>>> from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
|
||||
|
||||
>>> args = PyTorchBenchmarkArguments(models=["bert-base-uncased"], batch_sizes=[8], sequence_lengths=[8, 32, 128, 512])
|
||||
>>> benchmark = PyTorchBenchmark(args)
|
||||
|
||||
>>> ## TENSORFLOW CODE
|
||||
>>> from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
|
||||
|
||||
>>> args = TensorFlowBenchmarkArguments(models=["bert-base-uncased"], batch_sizes=[8], sequence_lengths=[8, 32, 128, 512])
|
||||
>>> benchmark = TensorFlowBenchmark(args)
|
||||
|
||||
|
||||
Here, three arguments are given to the benchmark argument data classes, namely ``models``, ``batch_sizes``, and ``sequence_lengths``. The argument ``models`` is required and expects a :obj:`list` of model identifiers from the `model hub <https://huggingface.co/models>`__
|
||||
The :obj:`list` arguments ``batch_sizes`` and ``sequence_lengths`` define the size of the ``input_ids`` on which the model is benchmarked.
|
||||
There are many more parameters that can be configured via the benchmark argument data classes. For more detail on these one can either directly consult the files
|
||||
``src/transformers/benchmark/benchmark_args_utils.py``, ``src/transformers/benchmark/benchmark_args.py`` (for PyTorch) and ``src/transformers/benchmark/benchmark_args_tf.py`` (for Tensorflow).
|
||||
Alternatively, running the following shell commands from root will print out a descriptive list of all configurable parameters for PyTorch and Tensorflow respectively.
|
||||
|
||||
.. code-block::
|
||||
|
||||
>>> ## PYTORCH CODE
|
||||
python examples/benchmarking/run_benchmark.py --help
|
||||
|
||||
>>> ## TENSORFLOW CODE
|
||||
python examples/benchmarking/run_benchmark_tf.py --help
|
||||
|
||||
|
||||
An instantiated benchmark object can then simply be run by calling ``benchmark.run()``.
|
||||
|
||||
.. code-block::
|
||||
|
||||
>>> ## PYTORCH CODE
|
||||
>>> results = benchmark.run()
|
||||
>>> print(results)
|
||||
==================== INFERENCE - SPEED - RESULT ====================
|
||||
--------------------------------------------------------------------------------
|
||||
Model Name Batch Size Seq Length Time in s
|
||||
--------------------------------------------------------------------------------
|
||||
bert-base-uncased 8 8 0.006
|
||||
bert-base-uncased 8 32 0.006
|
||||
bert-base-uncased 8 128 0.018
|
||||
bert-base-uncased 8 512 0.088
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
==================== INFERENCE - MEMORY - RESULT ====================
|
||||
--------------------------------------------------------------------------------
|
||||
Model Name Batch Size Seq Length Memory in MB
|
||||
--------------------------------------------------------------------------------
|
||||
bert-base-uncased 8 8 1227
|
||||
bert-base-uncased 8 32 1281
|
||||
bert-base-uncased 8 128 1307
|
||||
bert-base-uncased 8 512 1539
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
==================== ENVIRONMENT INFORMATION ====================
|
||||
- transformers_version: 2.11.0
|
||||
- framework: PyTorch
|
||||
- use_torchscript: False
|
||||
- framework_version: 1.4.0
|
||||
- python_version: 3.6.10
|
||||
- system: Linux
|
||||
- cpu: x86_64
|
||||
- architecture: 64bit
|
||||
- date: 2020-06-29
|
||||
- time: 08:58:43.371351
|
||||
- fp16: False
|
||||
- use_multiprocessing: True
|
||||
- only_pretrain_model: False
|
||||
- cpu_ram_mb: 32088
|
||||
- use_gpu: True
|
||||
- num_gpus: 1
|
||||
- gpu: TITAN RTX
|
||||
- gpu_ram_mb: 24217
|
||||
- gpu_power_watts: 280.0
|
||||
- gpu_performance_state: 2
|
||||
- use_tpu: False
|
||||
|
||||
>>> ## TENSORFLOW CODE
|
||||
>>> results = benchmark.run()
|
||||
>>> print(results)
|
||||
==================== INFERENCE - SPEED - RESULT ====================
|
||||
--------------------------------------------------------------------------------
|
||||
Model Name Batch Size Seq Length Time in s
|
||||
--------------------------------------------------------------------------------
|
||||
bert-base-uncased 8 8 0.005
|
||||
bert-base-uncased 8 32 0.008
|
||||
bert-base-uncased 8 128 0.022
|
||||
bert-base-uncased 8 512 0.105
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
==================== INFERENCE - MEMORY - RESULT ====================
|
||||
--------------------------------------------------------------------------------
|
||||
Model Name Batch Size Seq Length Memory in MB
|
||||
--------------------------------------------------------------------------------
|
||||
bert-base-uncased 8 8 1330
|
||||
bert-base-uncased 8 32 1330
|
||||
bert-base-uncased 8 128 1330
|
||||
bert-base-uncased 8 512 1770
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
==================== ENVIRONMENT INFORMATION ====================
|
||||
- transformers_version: 2.11.0
|
||||
- framework: Tensorflow
|
||||
- use_xla: False
|
||||
- framework_version: 2.2.0
|
||||
- python_version: 3.6.10
|
||||
- system: Linux
|
||||
- cpu: x86_64
|
||||
- architecture: 64bit
|
||||
- date: 2020-06-29
|
||||
- time: 09:26:35.617317
|
||||
- fp16: False
|
||||
- use_multiprocessing: True
|
||||
- only_pretrain_model: False
|
||||
- cpu_ram_mb: 32088
|
||||
- use_gpu: True
|
||||
- num_gpus: 1
|
||||
- gpu: TITAN RTX
|
||||
- gpu_ram_mb: 24217
|
||||
- gpu_power_watts: 280.0
|
||||
- gpu_performance_state: 2
|
||||
- use_tpu: False
|
||||
|
||||
By default, the `time` and the `required memory` for `inference` are benchmarked.
|
||||
In the example output above the first two sections show the result corresponding to `inference time` and `inference memory`.
|
||||
In addition, all relevant information about the computing environment, `e.g.` the GPU type, the system, the library versions, etc... are printed out in the third section under `ENVIRONMENT INFORMATION`.
|
||||
This information can optionally be saved in a `.csv` file when adding the argument :obj:`save_to_csv=True` to :class:`~transformers.PyTorchBenchmarkArguments` and :class:`~transformers.TensorFlowBenchmarkArguments` respectively.
|
||||
In this case, every section is saved in a separate `.csv` file. The path to each `.csv` file can optionally be defined via the argument data classes.
|
||||
|
||||
Instead of benchmarking pre-trained models via their model identifier, `e.g.` `bert-base-uncased`, the user can alternatively benchmark an arbitrary configuration of any available model class.
|
||||
In this case, a :obj:`list` of configurations must be inserted with the benchmark args as follows.
|
||||
|
||||
.. code-block::
|
||||
|
||||
>>> ## PYTORCH CODE
|
||||
>>> from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments, BertConfig
|
||||
|
||||
>>> args = PyTorchBenchmarkArguments(models=["bert-base", "bert-384-hid", "bert-6-lay"], batch_sizes=[8], sequence_lengths=[8, 32, 128, 512])
|
||||
>>> config_base = BertConfig()
|
||||
>>> config_384_hid = BertConfig(hidden_size=384)
|
||||
>>> config_6_lay = BertConfig(num_hidden_layers=6)
|
||||
|
||||
>>> benchmark = PyTorchBenchmark(args, configs=[config_base, config_384_hid, config_6_lay])
|
||||
>>> benchmark.run()
|
||||
==================== INFERENCE - SPEED - RESULT ====================
|
||||
--------------------------------------------------------------------------------
|
||||
Model Name Batch Size Seq Length Time in s
|
||||
--------------------------------------------------------------------------------
|
||||
bert-base 8 128 0.006
|
||||
bert-base 8 512 0.006
|
||||
bert-base 8 128 0.018
|
||||
bert-base 8 512 0.088
|
||||
bert-384-hid 8 8 0.006
|
||||
bert-384-hid 8 32 0.006
|
||||
bert-384-hid 8 128 0.011
|
||||
bert-384-hid 8 512 0.054
|
||||
bert-6-lay 8 8 0.003
|
||||
bert-6-lay 8 32 0.004
|
||||
bert-6-lay 8 128 0.009
|
||||
bert-6-lay 8 512 0.044
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
==================== INFERENCE - MEMORY - RESULT ====================
|
||||
--------------------------------------------------------------------------------
|
||||
Model Name Batch Size Seq Length Memory in MB
|
||||
--------------------------------------------------------------------------------
|
||||
bert-base 8 8 1277
|
||||
bert-base 8 32 1281
|
||||
bert-base 8 128 1307
|
||||
bert-base 8 512 1539
|
||||
bert-384-hid 8 8 1005
|
||||
bert-384-hid 8 32 1027
|
||||
bert-384-hid 8 128 1035
|
||||
bert-384-hid 8 512 1255
|
||||
bert-6-lay 8 8 1097
|
||||
bert-6-lay 8 32 1101
|
||||
bert-6-lay 8 128 1127
|
||||
bert-6-lay 8 512 1359
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
==================== ENVIRONMENT INFORMATION ====================
|
||||
- transformers_version: 2.11.0
|
||||
- framework: PyTorch
|
||||
- use_torchscript: False
|
||||
- framework_version: 1.4.0
|
||||
- python_version: 3.6.10
|
||||
- system: Linux
|
||||
- cpu: x86_64
|
||||
- architecture: 64bit
|
||||
- date: 2020-06-29
|
||||
- time: 09:35:25.143267
|
||||
- fp16: False
|
||||
- use_multiprocessing: True
|
||||
- only_pretrain_model: False
|
||||
- cpu_ram_mb: 32088
|
||||
- use_gpu: True
|
||||
- num_gpus: 1
|
||||
- gpu: TITAN RTX
|
||||
- gpu_ram_mb: 24217
|
||||
- gpu_power_watts: 280.0
|
||||
- gpu_performance_state: 2
|
||||
- use_tpu: False
|
||||
|
||||
>>> ## TENSORFLOW CODE
|
||||
>>> from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments, BertConfig
|
||||
|
||||
>>> args = TensorFlowBenchmarkArguments(models=["bert-base", "bert-384-hid", "bert-6-lay"], batch_sizes=[8], sequence_lengths=[8, 32, 128, 512])
|
||||
>>> config_base = BertConfig()
|
||||
>>> config_384_hid = BertConfig(hidden_size=384)
|
||||
>>> config_6_lay = BertConfig(num_hidden_layers=6)
|
||||
|
||||
>>> benchmark = TensorFlowBenchmark(args, configs=[config_base, config_384_hid, config_6_lay])
|
||||
>>> benchmark.run()
|
||||
==================== INFERENCE - SPEED - RESULT ====================
|
||||
--------------------------------------------------------------------------------
|
||||
Model Name Batch Size Seq Length Time in s
|
||||
--------------------------------------------------------------------------------
|
||||
bert-base 8 8 0.005
|
||||
bert-base 8 32 0.008
|
||||
bert-base 8 128 0.022
|
||||
bert-base 8 512 0.106
|
||||
bert-384-hid 8 8 0.005
|
||||
bert-384-hid 8 32 0.007
|
||||
bert-384-hid 8 128 0.018
|
||||
bert-384-hid 8 512 0.064
|
||||
bert-6-lay 8 8 0.002
|
||||
bert-6-lay 8 32 0.003
|
||||
bert-6-lay 8 128 0.0011
|
||||
bert-6-lay 8 512 0.074
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
==================== INFERENCE - MEMORY - RESULT ====================
|
||||
--------------------------------------------------------------------------------
|
||||
Model Name Batch Size Seq Length Memory in MB
|
||||
--------------------------------------------------------------------------------
|
||||
bert-base 8 8 1330
|
||||
bert-base 8 32 1330
|
||||
bert-base 8 128 1330
|
||||
bert-base 8 512 1770
|
||||
bert-384-hid 8 8 1330
|
||||
bert-384-hid 8 32 1330
|
||||
bert-384-hid 8 128 1330
|
||||
bert-384-hid 8 512 1540
|
||||
bert-6-lay 8 8 1330
|
||||
bert-6-lay 8 32 1330
|
||||
bert-6-lay 8 128 1330
|
||||
bert-6-lay 8 512 1540
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
==================== ENVIRONMENT INFORMATION ====================
|
||||
- transformers_version: 2.11.0
|
||||
- framework: Tensorflow
|
||||
- use_xla: False
|
||||
- framework_version: 2.2.0
|
||||
- python_version: 3.6.10
|
||||
- system: Linux
|
||||
- cpu: x86_64
|
||||
- architecture: 64bit
|
||||
- date: 2020-06-29
|
||||
- time: 09:38:15.487125
|
||||
- fp16: False
|
||||
- use_multiprocessing: True
|
||||
- only_pretrain_model: False
|
||||
- cpu_ram_mb: 32088
|
||||
- use_gpu: True
|
||||
- num_gpus: 1
|
||||
- gpu: TITAN RTX
|
||||
- gpu_ram_mb: 24217
|
||||
- gpu_power_watts: 280.0
|
||||
- gpu_performance_state: 2
|
||||
- use_tpu: False
|
||||
|
||||
|
||||
Again, `inference time` and `required memory` for `inference` are measured, but this time for customized configurations of the :obj:`BertModel` class. This feature can especially be helpful when
|
||||
deciding for which configuration the model should be trained.
|
||||
|
||||
|
||||
Benchmark best practices
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
This section lists a couple of best practices one should be aware of when benchmarking a model.
|
||||
|
||||
- Currently, only single device benchmarking is supported. When benchmarking on GPU, it is recommended that the user
|
||||
specifies on which device the code should be run by setting the ``CUDA_VISIBLE_DEVICES`` environment variable in the shell, `e.g.` ``export CUDA_VISIBLE_DEVICES=0`` before running the code.
|
||||
- The option :obj:`no_multi_processing` should only be set to :obj:`True` for testing and debugging. To ensure accurate memory measurement it is recommended to run each memory benchmark in a separate process by making sure :obj:`no_multi_processing` is set to :obj:`True`.
|
||||
- One should always state the environment information when sharing the results of a model benchmark. Results can vary heavily between different GPU devices, library versions, etc., so that benchmark results on their own are not very useful for the community.
|
||||
|
||||
|
||||
Sharing your benchmark
|
||||
~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Previously all available core models (10 at the time) have been benchmarked for `inference time`, across many different settings: using PyTorch, with
|
||||
and without TorchScript, using TensorFlow, with and without XLA. All of those tests were done across CPUs (except for
|
||||
TensorFlow XLA) and GPUs.
|
||||
|
||||
The approach is detailed in the `following blogpost <https://medium.com/huggingface/benchmarking-transformers-pytorch-and-tensorflow-e2917fb891c2>`__ and the results are available `here <https://docs.google.com/spreadsheets/d/1sryqufw2D0XlUH4sq3e9Wnxu5EAQkaohzrJbd5HdQ_w/edit?usp=sharing>`__.
|
||||
|
||||
With the new `benchmark` tools, it is easier than ever to share your benchmark results with the community `here <https://github.com/huggingface/transformers/blob/master/examples/benchmarking/README.md>`__.
|
||||
@ -8,11 +8,11 @@ There is a growing field of study concerned with investigating the inner working
|
||||
* Are Sixteen Heads Really Better than One? by Paul Michel, Omer Levy, Graham Neubig: https://arxiv.org/abs/1905.10650
|
||||
* What Does BERT Look At? An Analysis of BERT's Attention by Kevin Clark, Urvashi Khandelwal, Omer Levy, Christopher D. Manning: https://arxiv.org/abs/1906.04341
|
||||
|
||||
In order to help this new field develop, we have included a few additional features in the BERT/GPT/GPT-2 models to help people access the inner representations, mainly adapted from the great work of Paul Michel (https://arxiv.org/abs/1905.10650):
|
||||
In order to help this new field develop, we have included a few additional features in the BERT/GPT/GPT-2 models to help people access the inner representations, mainly adapted from the great work of Paul Michel (https://arxiv.org/abs/1905.10650):
|
||||
|
||||
|
||||
* accessing all the hidden-states of BERT/GPT/GPT-2,
|
||||
* accessing all the attention weights for each head of BERT/GPT/GPT-2,
|
||||
* retrieving heads output values and gradients to be able to compute head importance score and prune head as explained in https://arxiv.org/abs/1905.10650.
|
||||
|
||||
To help you understand and use these features, we have added a specific example script: `bertology.py <https://github.com/huggingface/transformers/blob/master/examples/run_bertology.py>`_ while extract information and prune a model pre-trained on GLUE.
|
||||
To help you understand and use these features, we have added a specific example script: `bertology.py <https://github.com/huggingface/transformers/blob/master/examples/bertology/run_bertology.py>`_ while extract information and prune a model pre-trained on GLUE.
|
||||
|
||||
@ -14,19 +14,19 @@
|
||||
#
|
||||
import os
|
||||
import sys
|
||||
sys.path.insert(0, os.path.abspath('../..'))
|
||||
sys.path.insert(0, os.path.abspath('../../src'))
|
||||
|
||||
|
||||
# -- Project information -----------------------------------------------------
|
||||
|
||||
project = u'transformers'
|
||||
copyright = u'2019, huggingface'
|
||||
copyright = u'2020, huggingface'
|
||||
author = u'huggingface'
|
||||
|
||||
# The short X.Y version
|
||||
version = u''
|
||||
# The full version, including alpha/beta/rc tags
|
||||
release = u'1.2.0'
|
||||
release = u'3.0.0'
|
||||
|
||||
|
||||
# -- General configuration ---------------------------------------------------
|
||||
@ -44,7 +44,8 @@ extensions = [
|
||||
'sphinx.ext.napoleon',
|
||||
'recommonmark',
|
||||
'sphinx.ext.viewcode',
|
||||
'sphinx_markdown_tables'
|
||||
'sphinx_markdown_tables',
|
||||
'sphinx_copybutton'
|
||||
]
|
||||
|
||||
# Add any paths that contain templates here, relative to this directory.
|
||||
@ -74,6 +75,8 @@ exclude_patterns = [u'_build', 'Thumbs.db', '.DS_Store']
|
||||
# The name of the Pygments (syntax highlighting) style to use.
|
||||
pygments_style = None
|
||||
|
||||
# Remove the prompt when copying examples
|
||||
copybutton_prompt_text = ">>> "
|
||||
|
||||
# -- Options for HTML output -------------------------------------------------
|
||||
|
||||
@ -105,6 +108,12 @@ html_static_path = ['_static']
|
||||
#
|
||||
# html_sidebars = {}
|
||||
|
||||
# This must be the name of an image file (path relative to the configuration
|
||||
# directory) that is the favicon of the docs. Modern browsers use this as
|
||||
# the icon for tabs, windows and bookmarks. It should be a Windows-style
|
||||
# icon file (.ico).
|
||||
html_favicon = 'favicon.ico'
|
||||
|
||||
|
||||
# -- Options for HTMLHelp output ---------------------------------------------
|
||||
|
||||
@ -181,8 +190,8 @@ epub_title = project
|
||||
epub_exclude_files = ['search.html']
|
||||
|
||||
def setup(app):
|
||||
app.add_stylesheet('css/huggingface.css')
|
||||
app.add_stylesheet('css/code-snippets.css')
|
||||
app.add_css_file('css/huggingface.css')
|
||||
app.add_css_file('css/code-snippets.css')
|
||||
app.add_js_file('js/custom.js')
|
||||
|
||||
# -- Extension configuration -------------------------------------------------
|
||||
|
||||
1
docs/source/contributing.md
Symbolic link
1
docs/source/contributing.md
Symbolic link
@ -0,0 +1 @@
|
||||
../../CONTRIBUTING.md
|
||||
@ -3,10 +3,16 @@ Converting Tensorflow Checkpoints
|
||||
|
||||
A command-line interface is provided to convert original Bert/GPT/GPT-2/Transformer-XL/XLNet/XLM checkpoints in models than be loaded using the ``from_pretrained`` methods of the library.
|
||||
|
||||
.. note::
|
||||
Since 2.3.0 the conversion script is now part of the transformers CLI (**transformers-cli**)
|
||||
available in any transformers >= 2.3.0 installation.
|
||||
|
||||
The documentation below reflects the **transformers-cli convert** command format.
|
||||
|
||||
BERT
|
||||
^^^^
|
||||
|
||||
You can convert any TensorFlow checkpoint for BERT (in particular `the pre-trained models released by Google <https://github.com/google-research/bert#pre-trained-models>`_\ ) in a PyTorch save file by using the `convert_tf_checkpoint_to_pytorch.py <https://github.com/huggingface/transformers/blob/master/transformers/convert_tf_checkpoint_to_pytorch.py>`_ script.
|
||||
You can convert any TensorFlow checkpoint for BERT (in particular `the pre-trained models released by Google <https://github.com/google-research/bert#pre-trained-models>`_\ ) in a PyTorch save file by using the `convert_bert_original_tf_checkpoint_to_pytorch.py <https://github.com/huggingface/transformers/blob/master/src/transformers/convert_bert_original_tf_checkpoint_to_pytorch.py>`_ script.
|
||||
|
||||
This CLI takes as input a TensorFlow checkpoint (three files starting with ``bert_model.ckpt``\ ) and the associated configuration file (\ ``bert_config.json``\ ), and creates a PyTorch model for this configuration, loads the weights from the TensorFlow checkpoint in the PyTorch model and saves the resulting model in a standard PyTorch save file that can be imported using ``torch.load()`` (see examples in `run_bert_extract_features.py <https://github.com/huggingface/pytorch-pretrained-BERT/tree/master/examples/run_bert_extract_features.py>`_\ , `run_bert_classifier.py <https://github.com/huggingface/pytorch-pretrained-BERT/tree/master/examples/run_bert_classifier.py>`_ and `run_bert_squad.py <https://github.com/huggingface/pytorch-pretrained-BERT/tree/master/examples/run_bert_squad.py>`_\ ).
|
||||
|
||||
@ -20,13 +26,33 @@ Here is an example of the conversion process for a pre-trained ``BERT-Base Uncas
|
||||
|
||||
export BERT_BASE_DIR=/path/to/bert/uncased_L-12_H-768_A-12
|
||||
|
||||
transformers bert \
|
||||
$BERT_BASE_DIR/bert_model.ckpt \
|
||||
$BERT_BASE_DIR/bert_config.json \
|
||||
$BERT_BASE_DIR/pytorch_model.bin
|
||||
transformers-cli convert --model_type bert \
|
||||
--tf_checkpoint $BERT_BASE_DIR/bert_model.ckpt \
|
||||
--config $BERT_BASE_DIR/bert_config.json \
|
||||
--pytorch_dump_output $BERT_BASE_DIR/pytorch_model.bin
|
||||
|
||||
You can download Google's pre-trained models for the conversion `here <https://github.com/google-research/bert#pre-trained-models>`__.
|
||||
|
||||
ALBERT
|
||||
^^^^^^
|
||||
|
||||
Convert TensorFlow model checkpoints of ALBERT to PyTorch using the `convert_albert_original_tf_checkpoint_to_pytorch.py <https://github.com/huggingface/transformers/blob/master/src/transformers/convert_bert_original_tf_checkpoint_to_pytorch.py>`_ script.
|
||||
|
||||
The CLI takes as input a TensorFlow checkpoint (three files starting with ``model.ckpt-best``\ ) and the accompanying configuration file (\ ``albert_config.json``\ ), then creates and saves a PyTorch model. To run this conversion you will need to have TensorFlow and PyTorch installed.
|
||||
|
||||
Here is an example of the conversion process for the pre-trained ``ALBERT Base`` model:
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
export ALBERT_BASE_DIR=/path/to/albert/albert_base
|
||||
|
||||
transformers-cli convert --model_type albert \
|
||||
--tf_checkpoint $ALBERT_BASE_DIR/model.ckpt-best \
|
||||
--config $ALBERT_BASE_DIR/albert_config.json \
|
||||
--pytorch_dump_output $ALBERT_BASE_DIR/pytorch_model.bin
|
||||
|
||||
You can download Google's pre-trained models for the conversion `here <https://github.com/google-research/albert#pre-trained-models>`__.
|
||||
|
||||
OpenAI GPT
|
||||
^^^^^^^^^^
|
||||
|
||||
@ -36,10 +62,12 @@ Here is an example of the conversion process for a pre-trained OpenAI GPT model,
|
||||
|
||||
export OPENAI_GPT_CHECKPOINT_FOLDER_PATH=/path/to/openai/pretrained/numpy/weights
|
||||
|
||||
transformers gpt \
|
||||
$OPENAI_GPT_CHECKPOINT_FOLDER_PATH \
|
||||
$PYTORCH_DUMP_OUTPUT \
|
||||
[OPENAI_GPT_CONFIG]
|
||||
transformers-cli convert --model_type gpt \
|
||||
--tf_checkpoint $OPENAI_GPT_CHECKPOINT_FOLDER_PATH \
|
||||
--pytorch_dump_output $PYTORCH_DUMP_OUTPUT \
|
||||
[--config OPENAI_GPT_CONFIG] \
|
||||
[--finetuning_task_name OPENAI_GPT_FINETUNED_TASK] \
|
||||
|
||||
|
||||
OpenAI GPT-2
|
||||
^^^^^^^^^^^^
|
||||
@ -50,10 +78,11 @@ Here is an example of the conversion process for a pre-trained OpenAI GPT-2 mode
|
||||
|
||||
export OPENAI_GPT2_CHECKPOINT_PATH=/path/to/gpt2/pretrained/weights
|
||||
|
||||
transformers gpt2 \
|
||||
$OPENAI_GPT2_CHECKPOINT_PATH \
|
||||
$PYTORCH_DUMP_OUTPUT \
|
||||
[OPENAI_GPT2_CONFIG]
|
||||
transformers-cli convert --model_type gpt2 \
|
||||
--tf_checkpoint $OPENAI_GPT2_CHECKPOINT_PATH \
|
||||
--pytorch_dump_output $PYTORCH_DUMP_OUTPUT \
|
||||
[--config OPENAI_GPT2_CONFIG] \
|
||||
[--finetuning_task_name OPENAI_GPT2_FINETUNED_TASK]
|
||||
|
||||
Transformer-XL
|
||||
^^^^^^^^^^^^^^
|
||||
@ -64,27 +93,28 @@ Here is an example of the conversion process for a pre-trained Transformer-XL mo
|
||||
|
||||
export TRANSFO_XL_CHECKPOINT_FOLDER_PATH=/path/to/transfo/xl/checkpoint
|
||||
|
||||
transformers transfo_xl \
|
||||
$TRANSFO_XL_CHECKPOINT_FOLDER_PATH \
|
||||
$PYTORCH_DUMP_OUTPUT \
|
||||
[TRANSFO_XL_CONFIG]
|
||||
transformers-cli convert --model_type transfo_xl \
|
||||
--tf_checkpoint $TRANSFO_XL_CHECKPOINT_FOLDER_PATH \
|
||||
--pytorch_dump_output $PYTORCH_DUMP_OUTPUT \
|
||||
[--config TRANSFO_XL_CONFIG] \
|
||||
[--finetuning_task_name TRANSFO_XL_FINETUNED_TASK]
|
||||
|
||||
|
||||
XLNet
|
||||
^^^^^
|
||||
|
||||
Here is an example of the conversion process for a pre-trained XLNet model, fine-tuned on STS-B using the TensorFlow script:
|
||||
Here is an example of the conversion process for a pre-trained XLNet model:
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
export TRANSFO_XL_CHECKPOINT_PATH=/path/to/xlnet/checkpoint
|
||||
export TRANSFO_XL_CONFIG_PATH=/path/to/xlnet/config
|
||||
|
||||
transformers xlnet \
|
||||
$TRANSFO_XL_CHECKPOINT_PATH \
|
||||
$TRANSFO_XL_CONFIG_PATH \
|
||||
$PYTORCH_DUMP_OUTPUT \
|
||||
STS-B \
|
||||
transformers-cli convert --model_type xlnet \
|
||||
--tf_checkpoint $TRANSFO_XL_CHECKPOINT_PATH \
|
||||
--config $TRANSFO_XL_CONFIG_PATH \
|
||||
--pytorch_dump_output $PYTORCH_DUMP_OUTPUT \
|
||||
[--finetuning_task_name XLNET_FINETUNED_TASK] \
|
||||
|
||||
|
||||
XLM
|
||||
@ -96,6 +126,8 @@ Here is an example of the conversion process for a pre-trained XLM model:
|
||||
|
||||
export XLM_CHECKPOINT_PATH=/path/to/xlm/checkpoint
|
||||
|
||||
transformers xlm \
|
||||
$XLM_CHECKPOINT_PATH \
|
||||
$PYTORCH_DUMP_OUTPUT \
|
||||
transformers-cli convert --model_type xlm \
|
||||
--tf_checkpoint $XLM_CHECKPOINT_PATH \
|
||||
--pytorch_dump_output $PYTORCH_DUMP_OUTPUT
|
||||
[--config XML_CONFIG] \
|
||||
[--finetuning_task_name XML_FINETUNED_TASK]
|
||||
1
docs/source/examples.md
Symbolic link
1
docs/source/examples.md
Symbolic link
@ -0,0 +1 @@
|
||||
../../examples/README.md
|
||||
BIN
docs/source/favicon.ico
Normal file
BIN
docs/source/favicon.ico
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 47 KiB |
238
docs/source/glossary.rst
Normal file
238
docs/source/glossary.rst
Normal file
@ -0,0 +1,238 @@
|
||||
Glossary
|
||||
^^^^^^^^
|
||||
|
||||
General terms
|
||||
-------------
|
||||
|
||||
- autoencoding models: see MLM
|
||||
- autoregressive models: see CLM
|
||||
- CLM: causal language modeling, a pretraining task where the model reads the texts in order and has to predict the
|
||||
next word. It's usually done by reading the whole sentence but using a mask inside the model to hide the future
|
||||
tokens at a certain timestep.
|
||||
- MLM: masked language modeling, a pretraining task where the model sees a corrupted version of the texts, usually done
|
||||
by masking some tokens randomly, and has to predict the original text.
|
||||
- multimodal: a task taht combines texts with another kind of inputs (for instance images).
|
||||
- NLG: natural language generation, all tasks related to generating text ( for instance talk with transformers,
|
||||
translation)
|
||||
- NLP: natural language processing, a generic way to say "deal with texts".
|
||||
- NLU: natural language understanding, all tasks related to understanding what is in a text (for instance classifying
|
||||
the whole text, individual words)
|
||||
- pretrained model: a model that has been pretrained on some data (for instance all of Wikipedia). Pretraining methods
|
||||
involve a self-supervised objective, which can be reading the text and trying to predict the next word (see CLM) or
|
||||
masking some words and trying to predict them (see MLM).
|
||||
- RNN: recurrent neural network, a type of model that uses a loop over a layer to process texts.
|
||||
- seq2seq or sequence-to-sequence: models that generate a new sequence from an input, like translation models, or
|
||||
summarization models (such as :doc:`Bart </model_doc/bart>` or :doc:`T5 </model_doc/t5>`).
|
||||
- token: a part of a sentence, usually a word, but can also be a subword (non-common words are often split in subwords)
|
||||
or a punctuation symbol.
|
||||
|
||||
Model inputs
|
||||
------------
|
||||
|
||||
Every model is different yet bears similarities with the others. Therefore most models use the same inputs, which are
|
||||
detailed here alongside usage examples.
|
||||
|
||||
.. _input-ids:
|
||||
|
||||
Input IDs
|
||||
~~~~~~~~~
|
||||
|
||||
The input ids are often the only required parameters to be passed to the model as input. *They are token indices,
|
||||
numerical representations of tokens building the sequences that will be used as input by the model*.
|
||||
|
||||
Each tokenizer works differently but the underlying mechanism remains the same. Here's an example using the BERT
|
||||
tokenizer, which is a `WordPiece <https://arxiv.org/pdf/1609.08144.pdf>`__ tokenizer:
|
||||
|
||||
::
|
||||
|
||||
>>> from transformers import BertTokenizer
|
||||
>>> tokenizer = BertTokenizer.from_pretrained("bert-base-cased")
|
||||
|
||||
>>> sequence = "A Titan RTX has 24GB of VRAM"
|
||||
|
||||
The tokenizer takes care of splitting the sequence into tokens available in the tokenizer vocabulary.
|
||||
|
||||
::
|
||||
|
||||
>>> tokenized_sequence = tokenizer.tokenize(sequence)
|
||||
|
||||
The tokens are either words or subwords. Here for instance, "VRAM" wasn't in the model vocabulary, so it's been split
|
||||
in "V", "RA" and "M". To indicate those tokens are not separate words but parts of the same word, a double-dash is
|
||||
added for "RA" and "M":
|
||||
|
||||
::
|
||||
|
||||
>>> print(tokenized_sequence)
|
||||
['A', 'Titan', 'R', '##T', '##X', 'has', '24', '##GB', 'of', 'V', '##RA', '##M']
|
||||
|
||||
These tokens can then be converted into IDs which are understandable by the model. This can be done by directly feeding
|
||||
the sentence to the tokenizer, which leverages the Rust implementation of
|
||||
`huggingface/tokenizers <https://github.com/huggingface/tokenizers>`__ for peak performance.
|
||||
|
||||
::
|
||||
|
||||
>>> encoded_sequence = tokenizer(sequence)["input_ids"]
|
||||
|
||||
The tokenizer returns a dictionary with all the arguments necessary for its corresponding model to work properly. The
|
||||
token indices are under the key "input_ids":
|
||||
|
||||
::
|
||||
|
||||
>>> print(encoded_sequence)
|
||||
[101, 138, 18696, 155, 1942, 3190, 1144, 1572, 13745, 1104, 159, 9664, 2107, 102]
|
||||
|
||||
Note that the tokenizer automatically adds "special tokens" (if the associated model rely on them) which are special
|
||||
IDs the model sometimes uses. If we decode the previous sequence of ids,
|
||||
|
||||
::
|
||||
|
||||
>>> decoded_sequence = tokenizer.decode(encoded_sequence)
|
||||
|
||||
we will see
|
||||
|
||||
::
|
||||
|
||||
>>> print(decoded_sequence)
|
||||
[CLS] A Titan RTX has 24GB of VRAM [SEP]
|
||||
|
||||
because this is the way a :class:`~transformers.BertModel` is going to expect its inputs.
|
||||
|
||||
.. _attention-mask:
|
||||
|
||||
Attention mask
|
||||
~~~~~~~~~~~~~~
|
||||
|
||||
The attention mask is an optional argument used when batching sequences together. This argument indicates to the
|
||||
model which tokens should be attended to, and which should not.
|
||||
|
||||
For example, consider these two sequences:
|
||||
|
||||
::
|
||||
|
||||
>>> from transformers import BertTokenizer
|
||||
>>> tokenizer = BertTokenizer.from_pretrained("bert-base-cased")
|
||||
|
||||
>>> sequence_a = "This is a short sequence."
|
||||
>>> sequence_b = "This is a rather long sequence. It is at least longer than the sequence A."
|
||||
|
||||
>>> encoded_sequence_a = tokenizer(sequence_a)["input_ids"]
|
||||
>>> encoded_sequence_b = tokenizer(sequence_b)["input_ids"]
|
||||
|
||||
The encoded versions have different lengths:
|
||||
|
||||
::
|
||||
|
||||
>>> len(encoded_sequence_a), len(encoded_sequence_b)
|
||||
(8, 19)
|
||||
|
||||
Therefore, we can't be put then together in a same tensor as-is. The first sequence needs to be padded up to the length
|
||||
of the second one, or the second one needs to be truncated down to the length of the first one.
|
||||
|
||||
In the first case, the list of IDs will be extended by the padding indices. We can pass a list to the tokenizer and ask
|
||||
it to pad like this:
|
||||
|
||||
::
|
||||
|
||||
>>> padded_sequences = tokenizer([sequence_a, sequence_b], padding=True)
|
||||
|
||||
We can see that 0s have been added on the right of the first sentence to make it the same length as the second one:
|
||||
|
||||
::
|
||||
|
||||
>>> padded_sequences["input_ids"]
|
||||
[[101, 1188, 1110, 170, 1603, 4954, 119, 102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [101, 1188, 1110, 170, 1897, 1263, 4954, 119, 1135, 1110, 1120, 1655, 2039, 1190, 1103, 4954, 138, 119, 102]]
|
||||
|
||||
This can then be converted into a tensor in PyTorch or TensorFlow. The attention mask is a binary tensor indicating
|
||||
the position of the padded indices so that the model does not attend to them. For the
|
||||
:class:`~transformers.BertTokenizer`, :obj:`1` indicate a value that should be attended to while :obj:`0` indicate
|
||||
a padded value. This attention mask is in the dictionary returned by the tokenizer under the key "attention_mask":
|
||||
|
||||
::
|
||||
|
||||
>>> padded_sequences["attention_mask"]
|
||||
[[1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
|
||||
|
||||
.. _token-type-ids:
|
||||
|
||||
Token Type IDs
|
||||
~~~~~~~~~~~~~~
|
||||
|
||||
Some models' purpose is to do sequence classification or question answering. These require two different sequences to
|
||||
be encoded in the same input IDs. They are usually separated by special tokens, such as the classifier and separator
|
||||
tokens. For example, the BERT model builds its two sequence input as such:
|
||||
|
||||
::
|
||||
|
||||
>>> # [CLS] SEQUENCE_A [SEP] SEQUENCE_B [SEP]
|
||||
|
||||
We can use our tokenizer to automatically generate such a sentence by passing the two sequences as two arguments (and
|
||||
not a list like before) like this:
|
||||
|
||||
::
|
||||
|
||||
>>> from transformers import BertTokenizer
|
||||
>>> tokenizer = BertTokenizer.from_pretrained("bert-base-cased")
|
||||
>>> sequence_a = "HuggingFace is based in NYC"
|
||||
>>> sequence_b = "Where is HuggingFace based?"
|
||||
|
||||
>>> encoded_dict = tokenizer(sequence_a, sequence_b)
|
||||
>>> decoded = tokenizer.decode(encoded_dict["input_ids"])
|
||||
|
||||
which will return:
|
||||
|
||||
::
|
||||
|
||||
>>> print(decoded)
|
||||
[CLS] HuggingFace is based in NYC [SEP] Where is HuggingFace based? [SEP]
|
||||
|
||||
This is enough for some models to understand where one sequence ends and where another begins. However, other models
|
||||
such as BERT have an additional mechanism, which are the token type IDs (also called segment IDs). They are a binary
|
||||
mask identifying the different sequences in the model.
|
||||
|
||||
The tokenizer returns in the dictionary under the key "token_type_ids":
|
||||
|
||||
::
|
||||
|
||||
>>> encoded_dict['token_type_ids']
|
||||
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1]
|
||||
|
||||
The first sequence, the "context" used for the question, has all its tokens represented by :obj:`0`, whereas the
|
||||
question has all its tokens represented by :obj:`1`. Some models, like :class:`~transformers.XLNetModel` use an
|
||||
additional token represented by a :obj:`2`.
|
||||
|
||||
.. _position-ids:
|
||||
|
||||
Position IDs
|
||||
~~~~~~~~~~~~
|
||||
|
||||
The position IDs are used by the model to identify which token is at which position. Contrary to RNNs that have the
|
||||
position of each token embedded within them, transformers are unaware of the position of each token. The position
|
||||
IDs are created for this purpose.
|
||||
|
||||
They are an optional parameter. If no position IDs are passed to the model, they are automatically created as absolute
|
||||
positional embeddings.
|
||||
|
||||
Absolute positional embeddings are selected in the range ``[0, config.max_position_embeddings - 1]``. Some models
|
||||
use other types of positional embeddings, such as sinusoidal position embeddings or relative position embeddings.
|
||||
|
||||
.. _feed-forward-chunking:
|
||||
|
||||
Feed Forward Chunking
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
In transformers two feed forward layers usually follows the self attention layer in each residual attention block.
|
||||
The intermediate embedding size of the feed forward layers is often bigger than the hidden size of the model (e.g.,
|
||||
for ``bert-base-uncased``).
|
||||
|
||||
For an input of size ``[batch_size, sequence_length]``, the memory required to store the intermediate feed forward
|
||||
embeddings ``[batch_size, sequence_length, config.intermediate_size]`` can account for a large fraction of the memory
|
||||
use. The authors of `Reformer: The Efficient Transformer <https://arxiv.org/abs/2001.04451>`_ noticed that since the
|
||||
computation is independent of the ``sequence_length`` dimension, it is mathematically equivalent to compute the output
|
||||
embeddings of both feed forward layers ``[batch_size, config.hidden_size]_0, ..., [batch_size, config.hidden_size]_n``
|
||||
individually and concat them afterward to ``[batch_size, sequence_length, config.hidden_size]`` with
|
||||
``n = sequence_length``, which trades increased computation time against reduced memory use, but yields a
|
||||
mathematically **equivalent** result.
|
||||
|
||||
For models employing the function :func:`~.transformers.apply_chunking_to_forward`, the ``chunk_size`` defines the
|
||||
number of output embeddings that are computed in parallel and thus defines the trade-off between memory and time
|
||||
complexity. If ``chunk_size`` is set to 0, no feed forward chunking is done.
|
||||
BIN
docs/source/imgs/local_attention_mask.png
Normal file
BIN
docs/source/imgs/local_attention_mask.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 27 KiB |
@ -1,29 +1,35 @@
|
||||
Transformers
|
||||
================================================================================================================================================
|
||||
|
||||
🤗 Transformers (formerly known as `pytorch-transformers` and `pytorch-pretrained-bert`) provides general-purpose architectures
|
||||
(BERT, GPT-2, RoBERTa, XLM, DistilBert, XLNet...) for Natural Language Understanding (NLU) and Natural Language Generation
|
||||
(NLG) with over 32+ pretrained models in 100+ languages and deep interoperability between TensorFlow 2.0 and PyTorch.
|
||||
State-of-the-art Natural Language Processing for Pytorch and TensorFlow 2.0.
|
||||
|
||||
🤗 Transformers (formerly known as `pytorch-transformers` and `pytorch-pretrained-bert`) provides general-purpose
|
||||
architectures (BERT, GPT-2, RoBERTa, XLM, DistilBert, XLNet...) for Natural Language Understanding (NLU) and Natural
|
||||
Language Generation (NLG) with over 32+ pretrained models in 100+ languages and deep interoperability between
|
||||
TensorFlow 2.0 and PyTorch.
|
||||
|
||||
This is the documentation of our repository `transformers <https://github.com/huggingface/transformers>`_.
|
||||
|
||||
Features
|
||||
---------------------------------------------------
|
||||
|
||||
- As easy to use as pytorch-transformers
|
||||
- As powerful and concise as Keras
|
||||
- High performance on NLU and NLG tasks
|
||||
- Low barrier to entry for educators and practitioners
|
||||
|
||||
State-of-the-art NLP for everyone
|
||||
State-of-the-art NLP for everyone:
|
||||
|
||||
- Deep learning researchers
|
||||
- Hands-on practitioners
|
||||
- AI/ML/NLP teachers and educators
|
||||
|
||||
Lower compute costs, smaller carbon footprint
|
||||
Lower compute costs, smaller carbon footprint:
|
||||
|
||||
- Researchers can share trained models instead of always retraining
|
||||
- Practitioners can reduce compute time and production costs
|
||||
- 8 architectures with over 30 pretrained models, some in more than 100 languages
|
||||
|
||||
Choose the right framework for every part of a model's lifetime
|
||||
Choose the right framework for every part of a model's lifetime:
|
||||
|
||||
- Train state-of-the-art models in 3 lines of code
|
||||
- Deep interoperability between TensorFlow 2.0 and PyTorch models
|
||||
- Move a single model between TF2.0/PyTorch frameworks at will
|
||||
@ -32,48 +38,143 @@ Choose the right framework for every part of a model's lifetime
|
||||
Contents
|
||||
---------------------------------
|
||||
|
||||
The library currently contains PyTorch and Tensorflow implementations, pre-trained model weights, usage scripts and conversion utilities for the following models:
|
||||
The documentation is organized in five parts:
|
||||
|
||||
1. `BERT <https://github.com/google-research/bert>`_ (from Google) released with the paper `BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding <https://arxiv.org/abs/1810.04805>`_ by Jacob Devlin, Ming-Wei Chang, Kenton Lee and Kristina Toutanova.
|
||||
2. `GPT <https://github.com/openai/finetune-transformer-lm>`_ (from OpenAI) released with the paper `Improving Language Understanding by Generative Pre-Training <https://blog.openai.com/language-unsupervised>`_ by Alec Radford, Karthik Narasimhan, Tim Salimans and Ilya Sutskever.
|
||||
3. `GPT-2 <https://blog.openai.com/better-language-models>`_ (from OpenAI) released with the paper `Language Models are Unsupervised Multitask Learners <https://blog.openai.com/better-language-models>`_ by Alec Radford*, Jeffrey Wu*, Rewon Child, David Luan, Dario Amodei** and Ilya Sutskever**.
|
||||
4. `Transformer-XL <https://github.com/kimiyoung/transformer-xl>`_ (from Google/CMU) released with the paper `Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context <https://arxiv.org/abs/1901.02860>`_ by Zihang Dai*, Zhilin Yang*, Yiming Yang, Jaime Carbonell, Quoc V. Le, Ruslan Salakhutdinov.
|
||||
5. `XLNet <https://github.com/zihangdai/xlnet>`_ (from Google/CMU) released with the paper `XLNet: Generalized Autoregressive Pretraining for Language Understanding <https://arxiv.org/abs/1906.08237>`_ by Zhilin Yang*, Zihang Dai*, Yiming Yang, Jaime Carbonell, Ruslan Salakhutdinov, Quoc V. Le.
|
||||
6. `XLM <https://github.com/facebookresearch/XLM>`_ (from Facebook) released together with the paper `Cross-lingual Language Model Pretraining <https://arxiv.org/abs/1901.07291>`_ by Guillaume Lample and Alexis Conneau.
|
||||
7. `RoBERTa <https://github.com/pytorch/fairseq/tree/master/examples/roberta>`_ (from Facebook), released together with the paper a `Robustly Optimized BERT Pretraining Approach <https://arxiv.org/abs/1907.11692>`_ by Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, Veselin Stoyanov.
|
||||
8. `DistilBERT <https://huggingface.co/transformers/model_doc/distilbert.html>`_ (from HuggingFace) released together with the blog post `Smaller, faster, cheaper, lighter: Introducing DistilBERT, a distilled version of BERT <https://medium.com/huggingface/distilbert-8cf3380435b5>`_ by Victor Sanh, Lysandre Debut and Thomas Wolf.
|
||||
- **GET STARTED** contains a quick tour, the installation instructions and some useful information about our philosophy
|
||||
and a glossary.
|
||||
- **USING 🤗 TRANSFORMERS** contains general tutorials on how to use the library.
|
||||
- **ADVANCED GUIDES** contains more advanced guides that are more specific to a given script or part of the library.
|
||||
- **RESEARCH** focuses on tutorials that have less to do with how to use the library but more about general resarch in
|
||||
transformers model
|
||||
- **PACKAGE REFERENCE** contains the documentation of each public class and function.
|
||||
|
||||
The library currently contains PyTorch and Tensorflow implementations, pre-trained model weights, usage scripts and
|
||||
conversion utilities for the following models:
|
||||
|
||||
1. `BERT <https://github.com/google-research/bert>`_ (from Google) released with the paper `BERT: Pre-training of Deep
|
||||
Bidirectional Transformers for Language Understanding <https://arxiv.org/abs/1810.04805>`_ by Jacob Devlin, Ming-Wei
|
||||
Chang, Kenton Lee, and Kristina Toutanova.
|
||||
2. `GPT <https://github.com/openai/finetune-transformer-lm>`_ (from OpenAI) released with the paper `Improving Language
|
||||
Understanding by Generative Pre-Training <https://blog.openai.com/language-unsupervised>`_ by Alec Radford, Karthik
|
||||
Narasimhan, Tim Salimans, and Ilya Sutskever.
|
||||
3. `GPT-2 <https://blog.openai.com/better-language-models>`_ (from OpenAI) released with the paper `Language Models are
|
||||
Unsupervised Multitask Learners <https://blog.openai.com/better-language-models>`_ by Alec Radford, Jeffrey Wu,
|
||||
Rewon Child, David Luan, Dario Amodei, and Ilya Sutskever.
|
||||
4. `Transformer-XL <https://github.com/kimiyoung/transformer-xl>`_ (from Google/CMU) released with the paper
|
||||
`Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context <https://arxiv.org/abs/1901.02860>`_ by
|
||||
Zihang Dai, Zhilin Yang, Yiming Yang, Jaime Carbonell, Quoc V. Le, and Ruslan Salakhutdinov.
|
||||
5. `XLNet <https://github.com/zihangdai/xlnet>`_ (from Google/CMU) released with the paper `XLNet: Generalized
|
||||
Autoregressive Pretraining for Language Understanding <https://arxiv.org/abs/1906.08237>`_ by Zhilin Yang, Zihang
|
||||
Dai, Yiming Yang, Jaime Carbonell, Ruslan Salakhutdinov, and Quoc V. Le.
|
||||
6. `XLM <https://github.com/facebookresearch/XLM>`_ (from Facebook) released together with the paper `Cross-lingual
|
||||
Language Model Pretraining <https://arxiv.org/abs/1901.07291>`_ by Guillaume Lample and Alexis Conneau.
|
||||
7. `RoBERTa <https://github.com/pytorch/fairseq/tree/master/examples/roberta>`_ (from Facebook), released together with
|
||||
the paper a `Robustly Optimized BERT Pretraining Approach <https://arxiv.org/abs/1907.11692>`_ by Yinhan Liu, Myle
|
||||
Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, and Veselin
|
||||
Stoyanov.
|
||||
8. `DistilBERT <https://huggingface.co/transformers/model_doc/distilbert.html>`_ (from HuggingFace) released together
|
||||
with the paper `DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter
|
||||
<https://arxiv.org/abs/1910.01108>`_ by Victor Sanh, Lysandre Debut, and Thomas Wolf. The same method has been
|
||||
applied to compress GPT2 into
|
||||
`DistilGPT2 <https://github.com/huggingface/transformers/tree/master/examples/distillation>`_.
|
||||
9. `CTRL <https://github.com/pytorch/fairseq/tree/master/examples/ctrl>`_ (from Salesforce), released together with the
|
||||
paper `CTRL: A Conditional Transformer Language Model for Controllable Generation
|
||||
<https://www.github.com/salesforce/ctrl>`_ by Nitish Shirish Keskar, Bryan McCann, Lav R. Varshney, Caiming Xiong,
|
||||
and Richard Socher.
|
||||
10. `CamemBERT <https://huggingface.co/transformers/model_doc/camembert.html>`_ (from FAIR, Inria, Sorbonne Université)
|
||||
released together with the paper `CamemBERT: a Tasty French Language Model <https://arxiv.org/abs/1911.03894>`_ by
|
||||
Louis Martin, Benjamin Muller, Pedro Javier Ortiz Suarez, Yoann Dupont, Laurent Romary, Eric Villemonte de la
|
||||
Clergerie, Djame Seddah, and Benoît Sagot.
|
||||
11. `ALBERT <https://github.com/google-research/ALBERT>`_ (from Google Research), released together with the paper
|
||||
`ALBERT: A Lite BERT for Self-supervised Learning of Language Representations <https://arxiv.org/abs/1909.11942>`_
|
||||
by Zhenzhong Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, and Radu Soricut.
|
||||
12. `T5 <https://github.com/google-research/text-to-text-transfer-transformer>`_ (from Google) released with the paper
|
||||
`Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer
|
||||
<https://arxiv.org/abs/1910.10683>`_ by Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang,
|
||||
Michael Matena, Yanqi Zhou, Wei Li, and Peter J. Liu.
|
||||
13. `XLM-RoBERTa <https://github.com/pytorch/fairseq/tree/master/examples/xlmr>`_ (from Facebook AI), released together
|
||||
with the paper `Unsupervised Cross-lingual Representation Learning at Scale <https://arxiv.org/abs/1911.02116>`_ by
|
||||
Alexis Conneau, Kartikay Khandelwal, Naman Goyal, Vishrav Chaudhary, Guillaume Wenzek, Francisco Guzmán, Edouard
|
||||
Grave, Myle Ott, Luke Zettlemoyer, and Veselin Stoyanov.
|
||||
14. `MMBT <https://github.com/facebookresearch/mmbt/>`_ (from Facebook), released together with the paper a `Supervised
|
||||
Multimodal Bitransformers for Classifying Images and Text <https://arxiv.org/pdf/1909.02950.pdf>`_ by Douwe Kiela,
|
||||
Suvrat Bhooshan, Hamed Firooz, and Davide Testuggine.
|
||||
15. `FlauBERT <https://github.com/getalp/Flaubert>`_ (from CNRS) released with the paper `FlauBERT: Unsupervised
|
||||
Language Model Pre-training for French <https://arxiv.org/abs/1912.05372>`_ by Hang Le, Loïc Vial, Jibril Frej,
|
||||
Vincent Segonne, Maximin Coavoux, Benjamin Lecouteux, Alexandre Allauzen, Benoît Crabbé, Laurent Besacier, and
|
||||
Didier Schwab.
|
||||
16. `BART <https://github.com/pytorch/fairseq/tree/master/examples/bart>`_ (from Facebook) released with the paper
|
||||
`BART: Denoising Sequence-to-Sequence Pre-training for Natural Language Generation, Translation, and Comprehension
|
||||
<https://arxiv.org/pdf/1910.13461.pdf>`_ by Mike Lewis, Yinhan Liu, Naman Goyal, Marjan Ghazvininejad, Abdelrahman
|
||||
Mohamed, Omer Levy, Ves Stoyanov, and Luke Zettlemoyer.
|
||||
17. `ELECTRA <https://github.com/google-research/electra>`_ (from Google Research/Stanford University) released with
|
||||
the paper `ELECTRA: Pre-training text encoders as discriminators rather than generators
|
||||
<https://arxiv.org/abs/2003.10555>`_ by Kevin Clark, Minh-Thang Luong, Quoc V. Le, and Christopher D. Manning.
|
||||
18. `DialoGPT <https://github.com/microsoft/DialoGPT>`_ (from Microsoft Research) released with the paper `DialoGPT:
|
||||
Large-Scale Generative Pre-training for Conversational Response Generation <https://arxiv.org/abs/1911.00536>`_ by
|
||||
Yizhe Zhang, Siqi Sun, Michel Galley, Yen-Chun Chen, Chris Brockett, Xiang Gao, Jianfeng Gao, Jingjing Liu,
|
||||
and Bill Dolan.
|
||||
19. `Reformer <https://github.com/google/trax/tree/master/trax/models/reformer>`_ (from Google Research) released with
|
||||
the paper `Reformer: The Efficient Transformer <https://arxiv.org/abs/2001.04451>`_ by Nikita Kitaev, Łukasz
|
||||
Kaiser, and Anselm Levskaya.
|
||||
20. `MarianMT <https://marian-nmt.github.io/>`_ (developed by the Microsoft Translator Team) machine translation models
|
||||
trained using `OPUS <http://opus.nlpl.eu/>`_ pretrained_models data by Jörg Tiedemann.
|
||||
21. `Longformer <https://github.com/allenai/longformer>`_ (from AllenAI) released with the paper `Longformer: The
|
||||
Long-Document Transformer <https://arxiv.org/abs/2004.05150>`_ by Iz Beltagy, Matthew E. Peters, and Arman Cohan.
|
||||
22. `Other community models <https://huggingface.co/models>`_, contributed by the `community
|
||||
<https://huggingface.co/users>`_.
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
:caption: Notes
|
||||
:caption: Get started
|
||||
|
||||
quicktour
|
||||
installation
|
||||
quickstart
|
||||
philosophy
|
||||
glossary
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
:caption: Using 🤗 Transformers
|
||||
|
||||
task_summary
|
||||
model_summary
|
||||
preprocessing
|
||||
training
|
||||
model_sharing
|
||||
multilingual
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
:caption: Advanced guides
|
||||
|
||||
pretrained_models
|
||||
examples
|
||||
notebooks
|
||||
serialization
|
||||
converting_tensorflow_models
|
||||
migration
|
||||
bertology
|
||||
torchscript
|
||||
contributing
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
:caption: Main classes
|
||||
:caption: Research
|
||||
|
||||
main_classes/configuration
|
||||
main_classes/model
|
||||
main_classes/tokenizer
|
||||
main_classes/optimizer_schedules
|
||||
main_classes/processors
|
||||
bertology
|
||||
benchmarks
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
:caption: Package Reference
|
||||
|
||||
main_classes/configuration
|
||||
main_classes/model
|
||||
main_classes/tokenizer
|
||||
main_classes/pipelines
|
||||
main_classes/optimizer_schedules
|
||||
main_classes/processors
|
||||
model_doc/auto
|
||||
model_doc/encoderdecoder
|
||||
model_doc/bert
|
||||
model_doc/gpt
|
||||
model_doc/transformerxl
|
||||
@ -82,3 +183,17 @@ The library currently contains PyTorch and Tensorflow implementations, pre-train
|
||||
model_doc/xlnet
|
||||
model_doc/roberta
|
||||
model_doc/distilbert
|
||||
model_doc/ctrl
|
||||
model_doc/camembert
|
||||
model_doc/albert
|
||||
model_doc/xlmroberta
|
||||
model_doc/flaubert
|
||||
model_doc/bart
|
||||
model_doc/t5
|
||||
model_doc/electra
|
||||
model_doc/dialogpt
|
||||
model_doc/reformer
|
||||
model_doc/marian
|
||||
model_doc/longformer
|
||||
model_doc/retribert
|
||||
model_doc/mobilebert
|
||||
|
||||
102
docs/source/installation.md
Normal file
102
docs/source/installation.md
Normal file
@ -0,0 +1,102 @@
|
||||
# Installation
|
||||
|
||||
🤗 Transformers is tested on Python 3.6+, and PyTorch 1.1.0+ or TensorFlow 2.0+.
|
||||
|
||||
You should install 🤗 Transformers in a [virtual environment](https://docs.python.org/3/library/venv.html). If you're
|
||||
unfamiliar with Python virtual environments, check out the [user guide](https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/). Create a virtual environment with the version of Python you're going
|
||||
to use and activate it.
|
||||
|
||||
Now, if you want to use 🤗 Transformers, you can install it with pip. If you'd like to play with the examples, you
|
||||
must install it from source.
|
||||
|
||||
## Installation with pip
|
||||
|
||||
First you need to install one of, or both, TensorFlow 2.0 and PyTorch.
|
||||
Please refer to [TensorFlow installation page](https://www.tensorflow.org/install/pip#tensorflow-2.0-rc-is-available)
|
||||
and/or [PyTorch installation page](https://pytorch.org/get-started/locally/#start-locally) regarding the specific
|
||||
install command for your platform.
|
||||
|
||||
When TensorFlow 2.0 and/or PyTorch has been installed, 🤗 Transformers can be installed using pip as follows:
|
||||
|
||||
```bash
|
||||
pip install transformers
|
||||
```
|
||||
|
||||
Alternatively, for CPU-support only, you can install 🤗 Transformers and PyTorch in one line with
|
||||
|
||||
```bash
|
||||
pip install transformers[torch]
|
||||
```
|
||||
|
||||
or 🤗 Transformers and TensorFlow 2.0 in one line with
|
||||
|
||||
```bash
|
||||
pip install transformers[tf-cpu]
|
||||
```
|
||||
|
||||
To check 🤗 Transformers is properly installed, run the following command:
|
||||
|
||||
```bash
|
||||
python -c "from transformers import pipeline; print(pipeline('sentiment-analysis')('I hate you'))"
|
||||
```
|
||||
|
||||
It should download a pretrained model then print something like
|
||||
|
||||
```bash
|
||||
[{'label': 'NEGATIVE', 'score': 0.9991129040718079}]
|
||||
```
|
||||
|
||||
(Note that TensorFlow will print additional stuff before that last statement.)
|
||||
|
||||
## Installing from source
|
||||
|
||||
To install from source, clone the repository and install with the following commands:
|
||||
|
||||
``` bash
|
||||
git clone https://github.com/huggingface/transformers.git
|
||||
cd transformers
|
||||
pip install -e .
|
||||
```
|
||||
|
||||
Again, you can run
|
||||
|
||||
```bash
|
||||
python -c "from transformers import pipeline; print(pipeline('sentiment-analysis')('I hate you'))"
|
||||
```
|
||||
|
||||
to check 🤗 Transformers is properly installed.
|
||||
|
||||
## Caching models
|
||||
|
||||
This library provides pretrained models that will be downloaded and cached locally. Unless you specify a location with
|
||||
`cache_dir=...` when you use methods like `from_pretrained`, these models will automatically be downloaded in the
|
||||
folder given by the shell environment variable ``TRANSFORMERS_CACHE``. The default value for it will be the PyTorch
|
||||
cache home followed by ``/transformers/`` (even if you don't have PyTorch installed). This is (by order of priority):
|
||||
|
||||
* shell environment variable ``ENV_TORCH_HOME``
|
||||
* shell environment variable ``ENV_XDG_CACHE_HOME`` + ``/torch/``
|
||||
* default: ``~/.cache/torch/``
|
||||
|
||||
So if you don't have any specific environment variable set, the cache directory will be at
|
||||
``~/.cache/torch/transformers/``.
|
||||
|
||||
**Note:** If you have set a shell enviromnent variable for one of the predecessors of this library
|
||||
(``PYTORCH_TRANSFORMERS_CACHE`` or ``PYTORCH_PRETRAINED_BERT_CACHE``), those will be used if there is no shell
|
||||
enviromnent variable for ``TRANSFORMERS_CACHE``.
|
||||
|
||||
### Note on model downloads (Continuous Integration or large-scale deployments)
|
||||
|
||||
If you expect to be downloading large volumes of models (more than 1,000) from our hosted bucket (for instance through
|
||||
your CI setup, or a large-scale production deployment), please cache the model files on your end. It will be way
|
||||
faster, and cheaper. Feel free to contact us privately if you need any help.
|
||||
|
||||
## Do you want to run a Transformer model on a mobile device?
|
||||
|
||||
You should check out our [swift-coreml-transformers](https://github.com/huggingface/swift-coreml-transformers) repo.
|
||||
|
||||
It contains a set of tools to convert PyTorch or TensorFlow 2.0 trained Transformer models (currently contains `GPT-2`,
|
||||
`DistilGPT-2`, `BERT`, and `DistilBERT`) to CoreML models that run on iOS devices.
|
||||
|
||||
At some point in the future, you'll be able to seamlessly move from pre-training or fine-tuning models in PyTorch or
|
||||
TensorFlow 2.0 to productizing them in CoreML, or prototype a model or an app in CoreML then research its
|
||||
hyperparameters or architecture from PyTorch or TensorFlow 2.0. Super exciting!
|
||||
@ -1,71 +0,0 @@
|
||||
Installation
|
||||
================================================
|
||||
|
||||
Transformers is tested on Python 2.7 and 3.5+ (examples are tested only on python 3.5+) and PyTorch 1.1.0
|
||||
|
||||
With pip
|
||||
^^^^^^^^
|
||||
|
||||
PyTorch Transformers can be installed using pip as follows:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
pip install transformers
|
||||
|
||||
From source
|
||||
^^^^^^^^^^^
|
||||
|
||||
To install from source, clone the repository and install with:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
git clone https://github.com/huggingface/transformers.git
|
||||
cd transformers
|
||||
pip install [--editable] .
|
||||
|
||||
|
||||
Tests
|
||||
^^^^^
|
||||
|
||||
An extensive test suite is included to test the library behavior and several examples. Library tests can be found in the `tests folder <https://github.com/huggingface/transformers/tree/master/transformers/tests>`_ and examples tests in the `examples folder <https://github.com/huggingface/transformers/tree/master/examples>`_.
|
||||
|
||||
Tests can be run using `pytest` (install pytest if needed with `pip install pytest`).
|
||||
|
||||
Run all the tests from the root of the cloned repository with the commands:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
python -m pytest -sv ./transformers/tests/
|
||||
python -m pytest -sv ./examples/
|
||||
|
||||
|
||||
OpenAI GPT original tokenization workflow
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
If you want to reproduce the original tokenization process of the ``OpenAI GPT`` paper, you will need to install ``ftfy`` (use version 4.4.3 if you are using Python 2) and ``SpaCy`` :
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
pip install spacy ftfy==4.4.3
|
||||
python -m spacy download en
|
||||
|
||||
If you don't install ``ftfy`` and ``SpaCy``\ , the ``OpenAI GPT`` tokenizer will default to tokenize using BERT's ``BasicTokenizer`` followed by Byte-Pair Encoding (which should be fine for most usage, don't worry).
|
||||
|
||||
|
||||
Note on model downloads (Continuous Integration or large-scale deployments)
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
If you expect to be downloading large volumes of models (more than 1,000) from our hosted bucket (for instance through your CI setup, or a large-scale production deployment), please cache the model files on your end. It will be way faster, and cheaper. Feel free to contact us privately if you need any help.
|
||||
|
||||
|
||||
Do you want to run a Transformer model on a mobile device?
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
You should check out our `swift-coreml-transformers <https://github.com/huggingface/swift-coreml-transformers>`_ repo.
|
||||
|
||||
It contains an example of a conversion script from a Pytorch trained Transformer model (here, ``GPT-2``) to a CoreML model that runs on iOS devices.
|
||||
|
||||
It also contains an implementation of BERT for Question answering.
|
||||
|
||||
At some point in the future, you'll be able to seamlessly move from pre-training or fine-tuning models in PyTorch to productizing them in CoreML,
|
||||
or prototype a model or an app in CoreML then research its hyperparameters or architecture from PyTorch. Super exciting!
|
||||
@ -14,8 +14,14 @@ The base class ``PreTrainedModel`` implements the common methods for loading/sav
|
||||
.. autoclass:: transformers.PreTrainedModel
|
||||
:members:
|
||||
|
||||
``Helper Functions``
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autofunction:: transformers.apply_chunking_to_forward
|
||||
|
||||
|
||||
``TFPreTrainedModel``
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: pytorch_transformers.TFPreTrainedModel
|
||||
.. autoclass:: transformers.TFPreTrainedModel
|
||||
:members:
|
||||
|
||||
@ -5,6 +5,7 @@ The ``.optimization`` module provides:
|
||||
|
||||
- an optimizer with weight decay fixed that can be used to fine-tuned models, and
|
||||
- several schedules in the form of schedule objects that inherit from ``_LRSchedule``:
|
||||
- a gradient accumulation class to accumulate the gradients of multiple batches
|
||||
|
||||
``AdamW``
|
||||
~~~~~~~~~~~~~~~~
|
||||
@ -12,34 +13,36 @@ The ``.optimization`` module provides:
|
||||
.. autoclass:: transformers.AdamW
|
||||
:members:
|
||||
|
||||
``AdamWeightDecay``
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.AdamWeightDecay
|
||||
|
||||
.. autofunction:: transformers.create_optimizer
|
||||
|
||||
Schedules
|
||||
----------------------------------------------------
|
||||
|
||||
Learning Rate Schedules
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
.. autoclass:: transformers.ConstantLRSchedule
|
||||
:members:
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
.. autofunction:: transformers.get_constant_schedule
|
||||
|
||||
|
||||
.. autoclass:: transformers.WarmupConstantSchedule
|
||||
:members:
|
||||
.. autofunction:: transformers.get_constant_schedule_with_warmup
|
||||
|
||||
.. image:: /imgs/warmup_constant_schedule.png
|
||||
:target: /imgs/warmup_constant_schedule.png
|
||||
:alt:
|
||||
|
||||
|
||||
.. autoclass:: transformers.WarmupCosineSchedule
|
||||
:members:
|
||||
.. autofunction:: transformers.get_cosine_schedule_with_warmup
|
||||
|
||||
.. image:: /imgs/warmup_cosine_schedule.png
|
||||
:target: /imgs/warmup_cosine_schedule.png
|
||||
:alt:
|
||||
|
||||
|
||||
.. autoclass:: transformers.WarmupCosineWithHardRestartsSchedule
|
||||
:members:
|
||||
.. autofunction:: transformers.get_cosine_with_hard_restarts_schedule_with_warmup
|
||||
|
||||
.. image:: /imgs/warmup_cosine_hard_restarts_schedule.png
|
||||
:target: /imgs/warmup_cosine_hard_restarts_schedule.png
|
||||
@ -47,9 +50,22 @@ Learning Rate Schedules
|
||||
|
||||
|
||||
|
||||
.. autoclass:: transformers.WarmupLinearSchedule
|
||||
:members:
|
||||
.. autofunction:: transformers.get_linear_schedule_with_warmup
|
||||
|
||||
.. image:: /imgs/warmup_linear_schedule.png
|
||||
:target: /imgs/warmup_linear_schedule.png
|
||||
:alt:
|
||||
|
||||
``Warmup``
|
||||
~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.WarmUp
|
||||
:members:
|
||||
|
||||
Gradient Strategies
|
||||
----------------------------------------------------
|
||||
|
||||
``GradientAccumulator``
|
||||
~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.GradientAccumulator
|
||||
|
||||
73
docs/source/main_classes/pipelines.rst
Normal file
73
docs/source/main_classes/pipelines.rst
Normal file
@ -0,0 +1,73 @@
|
||||
Pipelines
|
||||
----------------------------------------------------
|
||||
|
||||
The pipelines are a great and easy way to use models for inference. These pipelines are objects that abstract most
|
||||
of the complex code from the library, offering a simple API dedicated to several tasks, including Named Entity
|
||||
Recognition, Masked Language Modeling, Sentiment Analysis, Feature Extraction and Question Answering.
|
||||
|
||||
There are two categories of pipeline abstractions to be aware about:
|
||||
|
||||
- The :func:`~transformers.pipeline` which is the most powerful object encapsulating all other pipelines
|
||||
- The other task-specific pipelines, such as :class:`~transformers.TokenClassificationPipeline`
|
||||
or :class:`~transformers.QuestionAnsweringPipeline`
|
||||
|
||||
The pipeline abstraction
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The `pipeline` abstraction is a wrapper around all the other available pipelines. It is instantiated as any
|
||||
other pipeline but requires an additional argument which is the `task`.
|
||||
|
||||
.. autofunction:: transformers.pipeline
|
||||
|
||||
|
||||
The task specific pipelines
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Parent class: Pipeline
|
||||
=========================================
|
||||
|
||||
.. autoclass:: transformers.Pipeline
|
||||
:members: predict, transform, save_pretrained
|
||||
|
||||
TokenClassificationPipeline
|
||||
==========================================
|
||||
|
||||
.. autoclass:: transformers.TokenClassificationPipeline
|
||||
|
||||
NerPipeline
|
||||
==========================================
|
||||
|
||||
This class is an alias of the :class:`~transformers.TokenClassificationPipeline` defined above. Please refer to that pipeline for
|
||||
documentation and usage examples.
|
||||
|
||||
FillMaskPipeline
|
||||
==========================================
|
||||
|
||||
.. autoclass:: transformers.FillMaskPipeline
|
||||
|
||||
FeatureExtractionPipeline
|
||||
==========================================
|
||||
|
||||
.. autoclass:: transformers.FeatureExtractionPipeline
|
||||
|
||||
TextClassificationPipeline
|
||||
==========================================
|
||||
|
||||
.. autoclass:: transformers.TextClassificationPipeline
|
||||
|
||||
QuestionAnsweringPipeline
|
||||
==========================================
|
||||
|
||||
.. autoclass:: transformers.QuestionAnsweringPipeline
|
||||
|
||||
|
||||
SummarizationPipeline
|
||||
==========================================
|
||||
|
||||
.. autoclass:: transformers.SummarizationPipeline
|
||||
|
||||
|
||||
TextGenerationPipeline
|
||||
==========================================
|
||||
|
||||
.. autoclass:: transformers.TextGenerationPipeline
|
||||
@ -8,20 +8,20 @@ Processors
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
All processors follow the same architecture which is that of the
|
||||
:class:`~pytorch_transformers.data.processors.utils.DataProcessor`. The processor returns a list
|
||||
of :class:`~pytorch_transformers.data.processors.utils.InputExample`. These
|
||||
:class:`~pytorch_transformers.data.processors.utils.InputExample` can be converted to
|
||||
:class:`~pytorch_transformers.data.processors.utils.InputFeatures` in order to be fed to the model.
|
||||
:class:`~transformers.data.processors.utils.DataProcessor`. The processor returns a list
|
||||
of :class:`~transformers.data.processors.utils.InputExample`. These
|
||||
:class:`~transformers.data.processors.utils.InputExample` can be converted to
|
||||
:class:`~transformers.data.processors.utils.InputFeatures` in order to be fed to the model.
|
||||
|
||||
.. autoclass:: pytorch_transformers.data.processors.utils.DataProcessor
|
||||
.. autoclass:: transformers.data.processors.utils.DataProcessor
|
||||
:members:
|
||||
|
||||
|
||||
.. autoclass:: pytorch_transformers.data.processors.utils.InputExample
|
||||
.. autoclass:: transformers.data.processors.utils.InputExample
|
||||
:members:
|
||||
|
||||
|
||||
.. autoclass:: pytorch_transformers.data.processors.utils.InputFeatures
|
||||
.. autoclass:: transformers.data.processors.utils.InputFeatures
|
||||
:members:
|
||||
|
||||
|
||||
@ -36,23 +36,118 @@ This library hosts a total of 10 processors for the following tasks: MRPC, MNLI,
|
||||
CoLA, SST2, STSB, QQP, QNLI, RTE and WNLI.
|
||||
|
||||
Those processors are:
|
||||
- :class:`~pytorch_transformers.data.processors.utils.MrpcProcessor`
|
||||
- :class:`~pytorch_transformers.data.processors.utils.MnliProcessor`
|
||||
- :class:`~pytorch_transformers.data.processors.utils.MnliMismatchedProcessor`
|
||||
- :class:`~pytorch_transformers.data.processors.utils.Sst2Processor`
|
||||
- :class:`~pytorch_transformers.data.processors.utils.StsbProcessor`
|
||||
- :class:`~pytorch_transformers.data.processors.utils.QqpProcessor`
|
||||
- :class:`~pytorch_transformers.data.processors.utils.QnliProcessor`
|
||||
- :class:`~pytorch_transformers.data.processors.utils.RteProcessor`
|
||||
- :class:`~pytorch_transformers.data.processors.utils.WnliProcessor`
|
||||
- :class:`~transformers.data.processors.utils.MrpcProcessor`
|
||||
- :class:`~transformers.data.processors.utils.MnliProcessor`
|
||||
- :class:`~transformers.data.processors.utils.MnliMismatchedProcessor`
|
||||
- :class:`~transformers.data.processors.utils.Sst2Processor`
|
||||
- :class:`~transformers.data.processors.utils.StsbProcessor`
|
||||
- :class:`~transformers.data.processors.utils.QqpProcessor`
|
||||
- :class:`~transformers.data.processors.utils.QnliProcessor`
|
||||
- :class:`~transformers.data.processors.utils.RteProcessor`
|
||||
- :class:`~transformers.data.processors.utils.WnliProcessor`
|
||||
|
||||
Additionally, the following method can be used to load values from a data file and convert them to a list of
|
||||
:class:`~pytorch_transformers.data.processors.utils.InputExample`.
|
||||
:class:`~transformers.data.processors.utils.InputExample`.
|
||||
|
||||
.. automethod:: pytorch_transformers.data.processors.glue.glue_convert_examples_to_features
|
||||
.. automethod:: transformers.data.processors.glue.glue_convert_examples_to_features
|
||||
|
||||
Example usage
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
An example using these processors is given in the `run_glue.py <https://github.com/huggingface/pytorch-transformers/blob/master/examples/text-classification/run_glue.py>`__ script.
|
||||
|
||||
|
||||
XNLI
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
`The Cross-Lingual NLI Corpus (XNLI) <https://www.nyu.edu/projects/bowman/xnli/>`__ is a benchmark that evaluates
|
||||
the quality of cross-lingual text representations.
|
||||
XNLI is crowd-sourced dataset based on `MultiNLI <http://www.nyu.edu/projects/bowman/multinli/>`: pairs of text are labeled with textual entailment
|
||||
annotations for 15 different languages (including both high-resource language such as English and low-resource languages such as Swahili).
|
||||
|
||||
It was released together with the paper
|
||||
`XNLI: Evaluating Cross-lingual Sentence Representations <https://arxiv.org/abs/1809.05053>`__
|
||||
|
||||
This library hosts the processor to load the XNLI data:
|
||||
- :class:`~transformers.data.processors.utils.XnliProcessor`
|
||||
|
||||
Please note that since the gold labels are available on the test set, evaluation is performed on the test set.
|
||||
|
||||
An example using these processors is given in the
|
||||
`run_glue.py <https://github.com/huggingface/pytorch-transformers/blob/master/examples/run_glue.py>`__ script.
|
||||
`run_xnli.py <https://github.com/huggingface/pytorch-transformers/blob/master/examples/text-classification/run_xnli.py>`__ script.
|
||||
|
||||
|
||||
SQuAD
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
`The Stanford Question Answering Dataset (SQuAD) <https://rajpurkar.github.io/SQuAD-explorer//>`__ is a benchmark that evaluates
|
||||
the performance of models on question answering. Two versions are available, v1.1 and v2.0. The first version (v1.1) was released together with the paper
|
||||
`SQuAD: 100,000+ Questions for Machine Comprehension of Text <https://arxiv.org/abs/1606.05250>`__. The second version (v2.0) was released alongside
|
||||
the paper `Know What You Don't Know: Unanswerable Questions for SQuAD <https://arxiv.org/abs/1806.03822>`__.
|
||||
|
||||
This library hosts a processor for each of the two versions:
|
||||
|
||||
Processors
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Those processors are:
|
||||
- :class:`~transformers.data.processors.utils.SquadV1Processor`
|
||||
- :class:`~transformers.data.processors.utils.SquadV2Processor`
|
||||
|
||||
They both inherit from the abstract class :class:`~transformers.data.processors.utils.SquadProcessor`
|
||||
|
||||
.. autoclass:: transformers.data.processors.squad.SquadProcessor
|
||||
:members:
|
||||
|
||||
Additionally, the following method can be used to convert SQuAD examples into :class:`~transformers.data.processors.utils.SquadFeatures`
|
||||
that can be used as model inputs.
|
||||
|
||||
.. automethod:: transformers.data.processors.squad.squad_convert_examples_to_features
|
||||
|
||||
These processors as well as the aforementionned method can be used with files containing the data as well as with the `tensorflow_datasets` package.
|
||||
Examples are given below.
|
||||
|
||||
|
||||
Example usage
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Here is an example using the processors as well as the conversion method using data files:
|
||||
|
||||
Example::
|
||||
|
||||
# Loading a V2 processor
|
||||
processor = SquadV2Processor()
|
||||
examples = processor.get_dev_examples(squad_v2_data_dir)
|
||||
|
||||
# Loading a V1 processor
|
||||
processor = SquadV1Processor()
|
||||
examples = processor.get_dev_examples(squad_v1_data_dir)
|
||||
|
||||
features = squad_convert_examples_to_features(
|
||||
examples=examples,
|
||||
tokenizer=tokenizer,
|
||||
max_seq_length=max_seq_length,
|
||||
doc_stride=args.doc_stride,
|
||||
max_query_length=max_query_length,
|
||||
is_training=not evaluate,
|
||||
)
|
||||
|
||||
Using `tensorflow_datasets` is as easy as using a data file:
|
||||
|
||||
Example::
|
||||
|
||||
# tensorflow_datasets only handle Squad V1.
|
||||
tfds_examples = tfds.load("squad")
|
||||
examples = SquadV1Processor().get_examples_from_dataset(tfds_examples, evaluate=evaluate)
|
||||
|
||||
features = squad_convert_examples_to_features(
|
||||
examples=examples,
|
||||
tokenizer=tokenizer,
|
||||
max_seq_length=max_seq_length,
|
||||
doc_stride=args.doc_stride,
|
||||
max_query_length=max_query_length,
|
||||
is_training=not evaluate,
|
||||
)
|
||||
|
||||
|
||||
Another example using these processors is given in the
|
||||
`run_squad.py <https://github.com/huggingface/transformers/blob/master/examples/question-answering/run_squad.py>`__ script.
|
||||
|
||||
@ -1,16 +1,40 @@
|
||||
Tokenizer
|
||||
----------------------------------------------------
|
||||
|
||||
The base class ``PreTrainedTokenizer`` implements the common methods for loading/saving a tokenizer either from a local file or directory, or from a pretrained tokenizer provided by the library (downloaded from HuggingFace's AWS S3 repository).
|
||||
A tokenizer is in charge of preparing the inputs for a model. The library comprise tokenizers for all the models. Most of the tokenizers are available in two flavors: a full python implementation and a "Fast" implementation based on the Rust library `tokenizers`. The "Fast" implementations allows (1) a significant speed-up in particular when doing batched tokenization and (2) additional methods to map between the original string (character and words) and the token space (e.g. getting the index of the token comprising a given character or the span of characters corresponding to a given token). Currently no "Fast" implementation is available for the SentencePiece-based tokenizers (for T5, ALBERT, CamemBERT, XLMRoBERTa and XLNet models).
|
||||
|
||||
``PreTrainedTokenizer`` is the main entry point into tokenizers as it also implements the main methods for using all the tokenizers:
|
||||
The base classes ``PreTrainedTokenizer`` and ``PreTrainedTokenizerFast`` implements the common methods for encoding string inputs in model inputs (see below) and instantiating/saving python and "Fast" tokenizers either from a local file or directory or from a pretrained tokenizer provided by the library (downloaded from HuggingFace's AWS S3 repository).
|
||||
|
||||
- tokenizing, converting tokens to ids and back and encoding/decoding,
|
||||
``PreTrainedTokenizer`` and ``PreTrainedTokenizerFast`` thus implements the main methods for using all the tokenizers:
|
||||
|
||||
- tokenizing (spliting strings in sub-word token strings), converting tokens strings to ids and back, and encoding/decoding (i.e. tokenizing + convert to integers),
|
||||
- adding new tokens to the vocabulary in a way that is independant of the underlying structure (BPE, SentencePiece...),
|
||||
- managing special tokens (adding them, assigning them to roles, making sure they are not split during tokenization)
|
||||
- managing special tokens like mask, beginning-of-sentence, etc tokens (adding them, assigning them to attributes in the tokenizer for easy access and making sure they are not split during tokenization)
|
||||
|
||||
``BatchEncoding`` holds the output of the tokenizer's encoding methods (``__call__``, ``encode_plus`` and ``batch_encode_plus``) and is derived from a Python dictionary. When the tokenizer is a pure python tokenizer, this class behave just like a standard python dictionary and hold the various model inputs computed by these methodes (``input_ids``, ``attention_mask``...). When the tokenizer is a "Fast" tokenizer (i.e. backed by HuggingFace tokenizers library), this class provides in addition several advanced alignement methods which can be used to map between the original string (character and words) and the token space (e.g. getting the index of the token comprising a given character or the span of characters corresponding to a given token).
|
||||
|
||||
``PreTrainedTokenizer``
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.PreTrainedTokenizer
|
||||
:special-members: __call__
|
||||
:members:
|
||||
|
||||
``PreTrainedTokenizerFast``
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.PreTrainedTokenizerFast
|
||||
:special-members: __call__
|
||||
:members:
|
||||
|
||||
``BatchEncoding``
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.BatchEncoding
|
||||
:members:
|
||||
|
||||
``SpecialTokensMixin``
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.SpecialTokensMixin
|
||||
:members:
|
||||
|
||||
@ -1,17 +1,30 @@
|
||||
# Migrating from pytorch-pretrained-bert
|
||||
# Migrating from previous packages
|
||||
|
||||
## Migrating from pytorch-transformers to 🤗 Transformers
|
||||
|
||||
Here is a quick summary of what you should take care of when migrating from `pytorch-pretrained-bert` to `transformers`
|
||||
Here is a quick summary of what you should take care of when migrating from `pytorch-transformers` to 🤗 Transformers.
|
||||
|
||||
### Positional order of some models' keywords inputs (`attention_mask`, `token_type_ids`...) changed
|
||||
|
||||
To be able to use Torchscript (see #1010, #1204 and #1195) the specific order of some models **keywords inputs** (`attention_mask`, `token_type_ids`...) has been changed.
|
||||
|
||||
If you used to call the models with keyword names for keyword arguments, e.g. `model(inputs_ids, attention_mask=attention_mask, token_type_ids=token_type_ids)`, this should not cause any change.
|
||||
|
||||
If you used to call the models with positional inputs for keyword arguments, e.g. `model(inputs_ids, attention_mask, token_type_ids)`, you may have to double check the exact order of input arguments.
|
||||
|
||||
## Migrating from pytorch-pretrained-bert
|
||||
|
||||
Here is a quick summary of what you should take care of when migrating from `pytorch-pretrained-bert` to 🤗 Transformers
|
||||
|
||||
### Models always output `tuples`
|
||||
|
||||
The main breaking change when migrating from `pytorch-pretrained-bert` to `transformers` is that the models forward method always outputs a `tuple` with various elements depending on the model and the configuration parameters.
|
||||
The main breaking change when migrating from `pytorch-pretrained-bert` to 🤗 Transformers is that the models forward method always outputs a `tuple` with various elements depending on the model and the configuration parameters.
|
||||
|
||||
The exact content of the tuples for each model are detailled in the models' docstrings and the [documentation](https://huggingface.co/transformers/).
|
||||
|
||||
In pretty much every case, you will be fine by taking the first element of the output as the output you previously used in `pytorch-pretrained-bert`.
|
||||
|
||||
Here is a `pytorch-pretrained-bert` to `transformers` conversion example for a `BertForSequenceClassification` classification model:
|
||||
Here is a `pytorch-pretrained-bert` to 🤗 Transformers conversion example for a `BertForSequenceClassification` classification model:
|
||||
|
||||
```python
|
||||
# Let's load our model
|
||||
@ -20,14 +33,14 @@ model = BertForSequenceClassification.from_pretrained('bert-base-uncased')
|
||||
# If you used to have this line in pytorch-pretrained-bert:
|
||||
loss = model(input_ids, labels=labels)
|
||||
|
||||
# Now just use this line in transformers to extract the loss from the output tuple:
|
||||
# Now just use this line in 🤗 Transformers to extract the loss from the output tuple:
|
||||
outputs = model(input_ids, labels=labels)
|
||||
loss = outputs[0]
|
||||
|
||||
# In transformers you can also have access to the logits:
|
||||
# In 🤗 Transformers you can also have access to the logits:
|
||||
loss, logits = outputs[:2]
|
||||
|
||||
# And even the attention weigths if you configure the model to output them (and other outputs too, see the docstrings and documentation)
|
||||
# And even the attention weights if you configure the model to output them (and other outputs too, see the docstrings and documentation)
|
||||
model = BertForSequenceClassification.from_pretrained('bert-base-uncased', output_attentions=True)
|
||||
outputs = model(input_ids, labels=labels)
|
||||
loss, logits, attentions = outputs
|
||||
@ -84,26 +97,26 @@ Here is a conversion examples from `BertAdam` with a linear warmup and decay sch
|
||||
# Parameters:
|
||||
lr = 1e-3
|
||||
max_grad_norm = 1.0
|
||||
num_total_steps = 1000
|
||||
num_training_steps = 1000
|
||||
num_warmup_steps = 100
|
||||
warmup_proportion = float(num_warmup_steps) / float(num_total_steps) # 0.1
|
||||
warmup_proportion = float(num_warmup_steps) / float(num_training_steps) # 0.1
|
||||
|
||||
### Previously BertAdam optimizer was instantiated like this:
|
||||
optimizer = BertAdam(model.parameters(), lr=lr, schedule='warmup_linear', warmup=warmup_proportion, t_total=num_total_steps)
|
||||
optimizer = BertAdam(model.parameters(), lr=lr, schedule='warmup_linear', warmup=warmup_proportion, num_training_steps=num_training_steps)
|
||||
### and used like this:
|
||||
for batch in train_data:
|
||||
loss = model(batch)
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
|
||||
### In Transformers, optimizer and schedules are splitted and instantiated like this:
|
||||
### In 🤗 Transformers, optimizer and schedules are splitted and instantiated like this:
|
||||
optimizer = AdamW(model.parameters(), lr=lr, correct_bias=False) # To reproduce BertAdam specific behavior set correct_bias=False
|
||||
scheduler = WarmupLinearSchedule(optimizer, warmup_steps=num_warmup_steps, t_total=num_total_steps) # PyTorch scheduler
|
||||
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=num_training_steps) # PyTorch scheduler
|
||||
### and used like this:
|
||||
for batch in train_data:
|
||||
loss = model(batch)
|
||||
loss.backward()
|
||||
torch.nn.utils.clip_grad_norm_(model.parameters(), max_grad_norm) # Gradient clipping is not in AdamW anymore (so you can use amp without issue)
|
||||
scheduler.step()
|
||||
optimizer.step()
|
||||
scheduler.step()
|
||||
```
|
||||
|
||||
131
docs/source/model_doc/albert.rst
Normal file
131
docs/source/model_doc/albert.rst
Normal file
@ -0,0 +1,131 @@
|
||||
ALBERT
|
||||
----------------------------------------------------
|
||||
|
||||
Overview
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The ALBERT model was proposed in `ALBERT: A Lite BERT for Self-supervised Learning of Language Representations <https://arxiv.org/abs/1909.11942>`_
|
||||
by Zhenzhong Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut. It presents
|
||||
two parameter-reduction techniques to lower memory consumption and increase the training speed of BERT:
|
||||
|
||||
- Splitting the embedding matrix into two smaller matrices
|
||||
- Using repeating layers split among groups
|
||||
|
||||
The abstract from the paper is the following:
|
||||
|
||||
*Increasing model size when pretraining natural language representations often results in improved performance on
|
||||
downstream tasks. However, at some point further model increases become harder due to GPU/TPU memory limitations,
|
||||
longer training times, and unexpected model degradation. To address these problems, we present two parameter-reduction
|
||||
techniques to lower memory consumption and increase the training speed of BERT. Comprehensive empirical evidence shows
|
||||
that our proposed methods lead to models that scale much better compared to the original BERT. We also use a
|
||||
self-supervised loss that focuses on modeling inter-sentence coherence, and show it consistently helps downstream
|
||||
tasks with multi-sentence inputs. As a result, our best model establishes new state-of-the-art results on the GLUE,
|
||||
RACE, and SQuAD benchmarks while having fewer parameters compared to BERT-large.*
|
||||
|
||||
Tips:
|
||||
|
||||
- ALBERT is a model with absolute position embeddings so it's usually advised to pad the inputs on
|
||||
the right rather than the left.
|
||||
- ALBERT uses repeating layers which results in a small memory footprint, however the computational cost remains
|
||||
similar to a BERT-like architecture with the same number of hidden layers as it has to iterate through the same
|
||||
number of (repeating) layers.
|
||||
|
||||
The original code can be found `here <https://github.com/google-research/ALBERT>`_.
|
||||
|
||||
AlbertConfig
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.AlbertConfig
|
||||
:members:
|
||||
|
||||
|
||||
AlbertTokenizer
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.AlbertTokenizer
|
||||
:members: build_inputs_with_special_tokens, get_special_tokens_mask,
|
||||
create_token_type_ids_from_sequences, save_vocabulary
|
||||
|
||||
|
||||
AlbertModel
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.AlbertModel
|
||||
:members:
|
||||
|
||||
|
||||
AlbertForMaskedLM
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.AlbertForMaskedLM
|
||||
:members:
|
||||
|
||||
|
||||
AlbertForSequenceClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.AlbertForSequenceClassification
|
||||
:members:
|
||||
|
||||
|
||||
AlbertForMultipleChoice
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.AlbertForMultipleChoice
|
||||
:members:
|
||||
|
||||
|
||||
AlbertForTokenClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.AlbertForTokenClassification
|
||||
:members:
|
||||
|
||||
|
||||
AlbertForQuestionAnswering
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.AlbertForQuestionAnswering
|
||||
:members:
|
||||
|
||||
|
||||
TFAlbertModel
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.TFAlbertModel
|
||||
:members:
|
||||
|
||||
|
||||
TFAlbertForMaskedLM
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.TFAlbertForMaskedLM
|
||||
:members:
|
||||
|
||||
|
||||
TFAlbertForSequenceClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.TFAlbertForSequenceClassification
|
||||
:members:
|
||||
|
||||
|
||||
TFAlbertForMultipleChoice
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.TFAlbertForMultipleChoice
|
||||
:members:
|
||||
|
||||
|
||||
TFAlbertForTokenClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.TFAlbertForTokenClassification
|
||||
:members:
|
||||
|
||||
|
||||
TFAlbertForQuestionAnswering
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.TFAlbertForQuestionAnswering
|
||||
:members:
|
||||
@ -1,11 +1,15 @@
|
||||
AutoModels
|
||||
-----------
|
||||
|
||||
In many cases, the architecture you want to use can be guessed from the name or the path of the pretrained model you are supplying to the ``from_pretrained`` method.
|
||||
In many cases, the architecture you want to use can be guessed from the name or the path of the pretrained model you
|
||||
are supplying to the ``from_pretrained`` method.
|
||||
|
||||
AutoClasses are here to do this job for you so that you automatically retreive the relevant model given the name/path to the pretrained weights/config/vocabulary:
|
||||
AutoClasses are here to do this job for you so that you automatically retrieve the relevant model given the name/path
|
||||
to the pretrained weights/config/vocabulary:
|
||||
|
||||
Instantiating one of ``AutoModel``, ``AutoConfig`` and ``AutoTokenizer`` will directly create a class of the relevant architecture (ex: ``model = AutoModel.from_pretrained('bert-base-cased')`` will create a instance of ``BertModel``).
|
||||
Instantiating one of ``AutoModel``, ``AutoConfig`` and ``AutoTokenizer`` will directly create a class of the relevant
|
||||
architecture (ex: ``model = AutoModel.from_pretrained('bert-base-cased')`` will create a instance of
|
||||
:class:`~transformers.BertModel`).
|
||||
|
||||
|
||||
``AutoConfig``
|
||||
@ -15,6 +19,13 @@ Instantiating one of ``AutoModel``, ``AutoConfig`` and ``AutoTokenizer`` will di
|
||||
:members:
|
||||
|
||||
|
||||
``AutoTokenizer``
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.AutoTokenizer
|
||||
:members:
|
||||
|
||||
|
||||
``AutoModel``
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
@ -22,8 +33,77 @@ Instantiating one of ``AutoModel``, ``AutoConfig`` and ``AutoTokenizer`` will di
|
||||
:members:
|
||||
|
||||
|
||||
``AutoTokenizer``
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
``AutoModelForPreTraining``
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.AutoTokenizer
|
||||
.. autoclass:: transformers.AutoModelForPreTraining
|
||||
:members:
|
||||
|
||||
|
||||
``AutoModelWithLMHead``
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.AutoModelWithLMHead
|
||||
:members:
|
||||
|
||||
|
||||
``AutoModelForSequenceClassification``
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.AutoModelForSequenceClassification
|
||||
:members:
|
||||
|
||||
|
||||
``AutoModelForQuestionAnswering``
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.AutoModelForQuestionAnswering
|
||||
:members:
|
||||
|
||||
|
||||
``AutoModelForTokenClassification``
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.AutoModelForTokenClassification
|
||||
:members:
|
||||
|
||||
``TFAutoModel``
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.TFAutoModel
|
||||
:members:
|
||||
|
||||
|
||||
``TFAutoModelForPreTraining``
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.TFAutoModelForPreTraining
|
||||
:members:
|
||||
|
||||
|
||||
``TFAutoModelWithLMHead``
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.TFAutoModelWithLMHead
|
||||
:members:
|
||||
|
||||
|
||||
``TFAutoModelForSequenceClassification``
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.TFAutoModelForSequenceClassification
|
||||
:members:
|
||||
|
||||
|
||||
``TFAutoModelForQuestionAnswering``
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.TFAutoModelForQuestionAnswering
|
||||
:members:
|
||||
|
||||
|
||||
``TFAutoModelForTokenClassification``
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.TFAutoModelForTokenClassification
|
||||
:members:
|
||||
|
||||
71
docs/source/model_doc/bart.rst
Normal file
71
docs/source/model_doc/bart.rst
Normal file
@ -0,0 +1,71 @@
|
||||
Bart
|
||||
----------------------------------------------------
|
||||
**DISCLAIMER:** If you see something strange,
|
||||
file a `Github Issue <https://github.com/huggingface/transformers/issues/new?assignees=&labels=&template=bug-report.md&title>`__ and assign
|
||||
@sshleifer
|
||||
|
||||
Overview
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The Bart model was `proposed <https://arxiv.org/abs/1910.13461>`_ by Mike Lewis, Yinhan Liu, Naman Goyal, Marjan Ghazvininejad, Abdelrahman Mohamed, Omer Levy, Ves Stoyanov and Luke Zettlemoyer on 29 Oct, 2019.
|
||||
According to the abstract,
|
||||
|
||||
- Bart uses a standard seq2seq/machine translation architecture with a bidirectional encoder (like BERT) and a left-to-right decoder (like GPT).
|
||||
- The pretraining task involves randomly shuffling the order of the original sentences and a novel in-filling scheme, where spans of text are replaced with a single mask token.
|
||||
- BART is particularly effective when fine tuned for text generation but also works well for comprehension tasks. It matches the performance of RoBERTa with comparable training resources on GLUE and SQuAD, achieves new state-of-the-art results on a range of abstractive dialogue, question answering, and summarization tasks, with gains of up to 6 ROUGE.
|
||||
|
||||
The Authors' code can be found `here <https://github.com/pytorch/fairseq/tree/master/examples/bart>`_
|
||||
|
||||
|
||||
Implementation Notes:
|
||||
|
||||
- Bart doesn't use :obj:`token_type_ids` for sequence classification. Use BartTokenizer.encode to get the proper splitting.
|
||||
- The forward pass of ``BartModel`` will create decoder inputs (using the helper function ``transformers.modeling_bart._prepare_bart_decoder_inputs``) if they are not passed. This is different than some other modeling APIs.
|
||||
- Model predictions are intended to be identical to the original implementation. This only works, however, if the string you pass to ``fairseq.encode`` starts with a space.
|
||||
- ``BartForConditionalGeneration.generate`` should be used for conditional generation tasks like summarization, see the example in that docstrings
|
||||
- Models that load the ``"facebook/bart-large-cnn"`` weights will not have a ``mask_token_id``, or be able to perform mask filling tasks.
|
||||
|
||||
BartConfig
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.BartConfig
|
||||
:members:
|
||||
|
||||
|
||||
BartTokenizer
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.BartTokenizer
|
||||
:members:
|
||||
|
||||
|
||||
BartModel
|
||||
~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.BartModel
|
||||
:members: forward
|
||||
|
||||
.. autofunction:: transformers.modeling_bart._prepare_bart_decoder_inputs
|
||||
|
||||
|
||||
BartForSequenceClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.BartForSequenceClassification
|
||||
:members: forward
|
||||
|
||||
|
||||
BartForQuestionAnswering
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.BartForQuestionAnswering
|
||||
:members: forward
|
||||
|
||||
|
||||
BartForConditionalGeneration
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.BartForConditionalGeneration
|
||||
:members: generate, forward
|
||||
|
||||
|
||||
@ -1,128 +1,172 @@
|
||||
BERT
|
||||
----------------------------------------------------
|
||||
|
||||
``BertConfig``
|
||||
Overview
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The BERT model was proposed in `BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding <https://arxiv.org/abs/1810.04805>`__
|
||||
by Jacob Devlin, Ming-Wei Chang, Kenton Lee and Kristina Toutanova. It's a bidirectional transformer
|
||||
pre-trained using a combination of masked language modeling objective and next sentence prediction
|
||||
on a large corpus comprising the Toronto Book Corpus and Wikipedia.
|
||||
|
||||
The abstract from the paper is the following:
|
||||
|
||||
*We introduce a new language representation model called BERT, which stands for Bidirectional Encoder Representations
|
||||
from Transformers. Unlike recent language representation models, BERT is designed to pre-train deep bidirectional
|
||||
representations from unlabeled text by jointly conditioning on both left and right context in all layers. As a result,
|
||||
the pre-trained BERT model can be fine-tuned with just one additional output layer to create state-of-the-art models
|
||||
for a wide range of tasks, such as question answering and language inference, without substantial task-specific
|
||||
architecture modifications.*
|
||||
|
||||
*BERT is conceptually simple and empirically powerful. It obtains new state-of-the-art results on eleven natural
|
||||
language processing tasks, including pushing the GLUE score to 80.5% (7.7% point absolute improvement), MultiNLI
|
||||
accuracy to 86.7% (4.6% absolute improvement), SQuAD v1.1 question answering Test F1 to 93.2 (1.5 point absolute
|
||||
improvement) and SQuAD v2.0 Test F1 to 83.1 (5.1 point absolute improvement).*
|
||||
|
||||
Tips:
|
||||
|
||||
- BERT is a model with absolute position embeddings so it's usually advised to pad the inputs on
|
||||
the right rather than the left.
|
||||
- BERT was trained with a masked language modeling (MLM) objective. It is therefore efficient at predicting masked
|
||||
tokens and at NLU in general, but is not optimal for text generation. Models trained with a causal language
|
||||
modeling (CLM) objective are better in that regard.
|
||||
- Alongside MLM, BERT was trained using a next sentence prediction (NSP) objective using the [CLS] token as a sequence
|
||||
approximate. The user may use this token (the first token in a sequence built with special tokens) to get a sequence
|
||||
prediction rather than a token prediction. However, averaging over the sequence may yield better results than using
|
||||
the [CLS] token.
|
||||
|
||||
The original code can be found `here <https://github.com/google-research/bert>`_.
|
||||
|
||||
BertConfig
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.BertConfig
|
||||
:members:
|
||||
|
||||
|
||||
``BertTokenizer``
|
||||
BertTokenizer
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.BertTokenizer
|
||||
:members: build_inputs_with_special_tokens, get_special_tokens_mask,
|
||||
create_token_type_ids_from_sequences, save_vocabulary
|
||||
|
||||
|
||||
BertTokenizerFast
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.BertTokenizerFast
|
||||
:members:
|
||||
|
||||
|
||||
``BertModel``
|
||||
BertModel
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.BertModel
|
||||
:members:
|
||||
|
||||
|
||||
``BertForPreTraining``
|
||||
BertForPreTraining
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.BertForPreTraining
|
||||
:members:
|
||||
|
||||
|
||||
``BertForMaskedLM``
|
||||
BertForMaskedLM
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.BertForMaskedLM
|
||||
:members:
|
||||
|
||||
|
||||
``BertForNextSentencePrediction``
|
||||
BertForNextSentencePrediction
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.BertForNextSentencePrediction
|
||||
:members:
|
||||
|
||||
|
||||
``BertForSequenceClassification``
|
||||
BertForSequenceClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.BertForSequenceClassification
|
||||
:members:
|
||||
|
||||
|
||||
``BertForMultipleChoice``
|
||||
BertForMultipleChoice
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.BertForMultipleChoice
|
||||
:members:
|
||||
|
||||
|
||||
``BertForTokenClassification``
|
||||
BertForTokenClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.BertForTokenClassification
|
||||
:members:
|
||||
|
||||
|
||||
``BertForQuestionAnswering``
|
||||
BertForQuestionAnswering
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.BertForQuestionAnswering
|
||||
:members:
|
||||
|
||||
|
||||
``TFBertModel``
|
||||
TFBertModel
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: pytorch_transformers.TFBertModel
|
||||
.. autoclass:: transformers.TFBertModel
|
||||
:members:
|
||||
|
||||
|
||||
``TFBertForPreTraining``
|
||||
TFBertForPreTraining
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: pytorch_transformers.TFBertForPreTraining
|
||||
.. autoclass:: transformers.TFBertForPreTraining
|
||||
:members:
|
||||
|
||||
|
||||
``TFBertForMaskedLM``
|
||||
TFBertForMaskedLM
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: pytorch_transformers.TFBertForMaskedLM
|
||||
.. autoclass:: transformers.TFBertForMaskedLM
|
||||
:members:
|
||||
|
||||
|
||||
``TFBertForNextSentencePrediction``
|
||||
TFBertForNextSentencePrediction
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: pytorch_transformers.TFBertForNextSentencePrediction
|
||||
.. autoclass:: transformers.TFBertForNextSentencePrediction
|
||||
:members:
|
||||
|
||||
|
||||
``TFBertForSequenceClassification``
|
||||
TFBertForSequenceClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: pytorch_transformers.TFBertForSequenceClassification
|
||||
.. autoclass:: transformers.TFBertForSequenceClassification
|
||||
:members:
|
||||
|
||||
|
||||
``TFBertForMultipleChoice``
|
||||
TFBertForMultipleChoice
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: pytorch_transformers.TFBertForMultipleChoice
|
||||
.. autoclass:: transformers.TFBertForMultipleChoice
|
||||
:members:
|
||||
|
||||
|
||||
``TFBertForTokenClassification``
|
||||
TFBertForTokenClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: pytorch_transformers.TFBertForTokenClassification
|
||||
.. autoclass:: transformers.TFBertForTokenClassification
|
||||
:members:
|
||||
|
||||
|
||||
``TFBertForQuestionAnswering``
|
||||
TFBertForQuestionAnswering
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: pytorch_transformers.TFBertForQuestionAnswering
|
||||
.. autoclass:: transformers.TFBertForQuestionAnswering
|
||||
:members:
|
||||
|
||||
|
||||
119
docs/source/model_doc/camembert.rst
Normal file
119
docs/source/model_doc/camembert.rst
Normal file
@ -0,0 +1,119 @@
|
||||
CamemBERT
|
||||
----------------------------------------------------
|
||||
|
||||
Overview
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The CamemBERT model was proposed in `CamemBERT: a Tasty French Language Model <https://arxiv.org/abs/1911.03894>`__
|
||||
by Louis Martin, Benjamin Muller, Pedro Javier Ortiz Suárez, Yoann Dupont, Laurent Romary, Éric Villemonte de la
|
||||
Clergerie, Djamé Seddah, and Benoît Sagot. It is based on Facebook's RoBERTa model released in 2019. It is a model
|
||||
trained on 138GB of French text.
|
||||
|
||||
The abstract from the paper is the following:
|
||||
|
||||
*Pretrained language models are now ubiquitous in Natural Language Processing. Despite their success,
|
||||
most available models have either been trained on English data or on the concatenation of data in multiple
|
||||
languages. This makes practical use of such models --in all languages except English-- very limited. Aiming
|
||||
to address this issue for French, we release CamemBERT, a French version of the Bi-directional Encoders for
|
||||
Transformers (BERT). We measure the performance of CamemBERT compared to multilingual models in multiple
|
||||
downstream tasks, namely part-of-speech tagging, dependency parsing, named-entity recognition, and natural
|
||||
language inference. CamemBERT improves the state of the art for most of the tasks considered. We release the
|
||||
pretrained model for CamemBERT hoping to foster research and downstream applications for French NLP.*
|
||||
|
||||
Tips:
|
||||
|
||||
- This implementation is the same as RoBERTa. Refer to the `documentation of RoBERTa <./roberta.html>`__ for usage
|
||||
examples as well as the information relative to the inputs and outputs.
|
||||
|
||||
The original code can be found `here <https://camembert-model.fr/>`_.
|
||||
|
||||
CamembertConfig
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.CamembertConfig
|
||||
:members:
|
||||
|
||||
|
||||
CamembertTokenizer
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.CamembertTokenizer
|
||||
:members: build_inputs_with_special_tokens, get_special_tokens_mask,
|
||||
create_token_type_ids_from_sequences, save_vocabulary
|
||||
|
||||
|
||||
CamembertModel
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.CamembertModel
|
||||
:members:
|
||||
|
||||
|
||||
CamembertForMaskedLM
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.CamembertForMaskedLM
|
||||
:members:
|
||||
|
||||
|
||||
CamembertForSequenceClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.CamembertForSequenceClassification
|
||||
:members:
|
||||
|
||||
|
||||
CamembertForMultipleChoice
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.CamembertForMultipleChoice
|
||||
:members:
|
||||
|
||||
|
||||
CamembertForTokenClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.CamembertForTokenClassification
|
||||
:members:
|
||||
|
||||
|
||||
CamembertForQuestionAnswering
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.CamembertForQuestionAnswering
|
||||
:members:
|
||||
|
||||
|
||||
TFCamembertModel
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.TFCamembertModel
|
||||
:members:
|
||||
|
||||
|
||||
TFCamembertForMaskedLM
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.TFCamembertForMaskedLM
|
||||
:members:
|
||||
|
||||
|
||||
TFCamembertForSequenceClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.TFCamembertForSequenceClassification
|
||||
:members:
|
||||
|
||||
|
||||
TFCamembertForTokenClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.TFCamembertForTokenClassification
|
||||
:members:
|
||||
|
||||
|
||||
TFCamembertForQuestionAnswering
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.TFCamembertForQuestionAnswering
|
||||
:members:
|
||||
80
docs/source/model_doc/ctrl.rst
Normal file
80
docs/source/model_doc/ctrl.rst
Normal file
@ -0,0 +1,80 @@
|
||||
CTRL
|
||||
----------------------------------------------------
|
||||
|
||||
Overview
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
CTRL model was proposed in `CTRL: A Conditional Transformer Language Model for Controllable Generation <https://arxiv.org/abs/1909.05858>`_
|
||||
by Nitish Shirish Keskar*, Bryan McCann*, Lav R. Varshney, Caiming Xiong and Richard Socher.
|
||||
It's a causal (unidirectional) transformer pre-trained using language modeling on a very large
|
||||
corpus of ~140 GB of text data with the first token reserved as a control code (such as Links, Books, Wikipedia etc.).
|
||||
|
||||
The abstract from the paper is the following:
|
||||
|
||||
*Large-scale language models show promising text generation capabilities, but users cannot easily control particular
|
||||
aspects of the generated text. We release CTRL, a 1.63 billion-parameter conditional transformer language model,
|
||||
trained to condition on control codes that govern style, content, and task-specific behavior. Control codes were
|
||||
derived from structure that naturally co-occurs with raw text, preserving the advantages of unsupervised learning
|
||||
while providing more explicit control over text generation. These codes also allow CTRL to predict which parts of
|
||||
the training data are most likely given a sequence. This provides a potential method for analyzing large amounts
|
||||
of data via model-based source attribution.*
|
||||
|
||||
Tips:
|
||||
|
||||
- CTRL makes use of control codes to generate text: it requires generations to be started by certain words, sentences
|
||||
or links to generate coherent text. Refer to the `original implementation <https://github.com/salesforce/ctrl>`__
|
||||
for more information.
|
||||
- CTRL is a model with absolute position embeddings so it's usually advised to pad the inputs on
|
||||
the right rather than the left.
|
||||
- CTRL was trained with a causal language modeling (CLM) objective and is therefore powerful at predicting the next
|
||||
token in a sequence. Leveraging this feature allows CTRL to generate syntactically coherent text as
|
||||
it can be observed in the `run_generation.py` example script.
|
||||
- The PyTorch models can take the `past` as input, which is the previously computed key/value attention pairs. Using
|
||||
this `past` value prevents the model from re-computing pre-computed values in the context of text generation.
|
||||
See `reusing the past in generative models <../quickstart.html#using-the-past>`_ for more information on the usage
|
||||
of this argument.
|
||||
|
||||
The original code can be found `here <https://github.com/salesforce/ctrl>`_.
|
||||
|
||||
|
||||
CTRLConfig
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.CTRLConfig
|
||||
:members:
|
||||
|
||||
|
||||
CTRLTokenizer
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.CTRLTokenizer
|
||||
:members: save_vocabulary
|
||||
|
||||
|
||||
CTRLModel
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.CTRLModel
|
||||
:members:
|
||||
|
||||
|
||||
CTRLLMHeadModel
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.CTRLLMHeadModel
|
||||
:members:
|
||||
|
||||
|
||||
TFCTRLModel
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.TFCTRLModel
|
||||
:members:
|
||||
|
||||
|
||||
TFCTRLLMHeadModel
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.TFCTRLLMHeadModel
|
||||
:members:
|
||||
|
||||
39
docs/source/model_doc/dialogpt.rst
Normal file
39
docs/source/model_doc/dialogpt.rst
Normal file
@ -0,0 +1,39 @@
|
||||
DialoGPT
|
||||
----------------------------------------------------
|
||||
|
||||
Overview
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
DialoGPT was proposed in
|
||||
`DialoGPT: Large-Scale Generative Pre-training for Conversational Response Generation <https://arxiv.org/abs/1911.00536>`_
|
||||
by Yizhe Zhang, Siqi Sun, Michel Galley, Yen-Chun Chen, Chris Brockett, Xiang Gao, Jianfeng Gao, Jingjing Liu, Bill Dolan.
|
||||
It's a GPT2 Model trained on 147M conversation-like exchanges extracted from Reddit.
|
||||
|
||||
The abstract from the paper is the following:
|
||||
|
||||
*We present a large, tunable neural conversational response generation model, DialoGPT (dialogue generative pre-trained transformer).
|
||||
Trained on 147M conversation-like exchanges extracted from Reddit comment chains over a period spanning from 2005 through 2017, DialoGPT extends the Hugging Face PyTorch transformer to attain a performance close to human both in terms of automatic and human evaluation in single-turn dialogue settings.
|
||||
We show that conversational systems that leverage DialoGPT generate more relevant, contentful and context-consistent responses than strong baseline systems.
|
||||
The pre-trained model and training pipeline are publicly released to facilitate research into neural response generation and the development of more intelligent open-domain dialogue systems.*
|
||||
|
||||
Tips:
|
||||
|
||||
- DialoGPT is a model with absolute position embeddings so it's usually advised to pad the inputs on
|
||||
the right rather than the left.
|
||||
- DialoGPT was trained with a causal language modeling (CLM) objective on conversational data and is therefore powerful at response generation in open-domain dialogue systems.
|
||||
- DialoGPT enables the user to create a chat bot in just 10 lines of code as shown on `DialoGPT's model card <https://huggingface.co/microsoft/DialoGPT-medium>`_.
|
||||
|
||||
Training:
|
||||
|
||||
In order to train or fine-tune DialoGPT, one can use causal language modeling training.
|
||||
To cite the official paper:
|
||||
*We follow the OpenAI GPT-2 to model a multiturn dialogue session
|
||||
as a long text and frame the generation task as language modeling. We first
|
||||
concatenate all dialog turns within a dialogue session into a long text
|
||||
x_1,..., x_N (N is the sequence length), ended by the end-of-text token.*
|
||||
For more information please confer to the original paper.
|
||||
|
||||
|
||||
DialoGPT's architecture is based on the GPT2 model, so one can refer to GPT2's `docstring <https://huggingface.co/transformers/model_doc/gpt2.html>`_.
|
||||
|
||||
The original code can be found `here <https://github.com/microsoft/DialoGPT>`_.
|
||||
@ -1,70 +1,139 @@
|
||||
DistilBERT
|
||||
----------------------------------------------------
|
||||
|
||||
``DistilBertConfig``
|
||||
Overview
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The DistilBERT model was proposed in the blog post
|
||||
`Smaller, faster, cheaper, lighter: Introducing DistilBERT, a distilled version of BERT <https://medium.com/huggingface/distilbert-8cf3380435b5>`__,
|
||||
and the paper `DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter <https://arxiv.org/abs/1910.01108>`__.
|
||||
DistilBERT is a small, fast, cheap and light Transformer model trained by distilling Bert base. It has 40% less
|
||||
parameters than `bert-base-uncased`, runs 60% faster while preserving over 95% of Bert's performances as measured on
|
||||
the GLUE language understanding benchmark.
|
||||
|
||||
The abstract from the paper is the following:
|
||||
|
||||
*As Transfer Learning from large-scale pre-trained models becomes more prevalent in Natural Language Processing (NLP),
|
||||
operating these large models in on-the-edge and/or under constrained computational training or inference budgets
|
||||
remains challenging. In this work, we propose a method to pre-train a smaller general-purpose language representation
|
||||
model, called DistilBERT, which can then be fine-tuned with good performances on a wide range of tasks like its larger
|
||||
counterparts. While most prior work investigated the use of distillation for building task-specific models, we
|
||||
leverage knowledge distillation during the pre-training phase and show that it is possible to reduce the size of a
|
||||
BERT model by 40%, while retaining 97% of its language understanding capabilities and being 60% faster. To leverage
|
||||
the inductive biases learned by larger models during pre-training, we introduce a triple loss combining language
|
||||
modeling, distillation and cosine-distance losses. Our smaller, faster and lighter model is cheaper to pre-train
|
||||
and we demonstrate its capabilities for on-device computations in a proof-of-concept experiment and a comparative
|
||||
on-device study.*
|
||||
|
||||
Tips:
|
||||
|
||||
- DistilBert doesn't have `token_type_ids`, you don't need to indicate which token belongs to which segment. Just separate your segments with the separation token `tokenizer.sep_token` (or `[SEP]`)
|
||||
- DistilBert doesn't have options to select the input positions (`position_ids` input). This could be added if necessary though, just let's us know if you need this option.
|
||||
|
||||
The original code can be found `here <https://github.com/huggingface/transformers/tree/master/examples/distillation>`_.
|
||||
|
||||
|
||||
DistilBertConfig
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.DistilBertConfig
|
||||
:members:
|
||||
|
||||
|
||||
``DistilBertTokenizer``
|
||||
DistilBertTokenizer
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.DistilBertTokenizer
|
||||
:members:
|
||||
|
||||
|
||||
``DistilBertModel``
|
||||
DistilBertTokenizerFast
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.DistilBertTokenizerFast
|
||||
:members:
|
||||
|
||||
|
||||
DistilBertModel
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.DistilBertModel
|
||||
:members:
|
||||
|
||||
|
||||
``DistilBertForMaskedLM``
|
||||
DistilBertForMaskedLM
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.DistilBertForMaskedLM
|
||||
:members:
|
||||
|
||||
|
||||
``DistilBertForSequenceClassification``
|
||||
DistilBertForSequenceClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.DistilBertForSequenceClassification
|
||||
:members:
|
||||
|
||||
|
||||
``DistilBertForQuestionAnswering``
|
||||
DistilBertForMultipleChoice
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.DistilBertForMultipleChoice
|
||||
:members:
|
||||
|
||||
|
||||
DistilBertForTokenClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.DistilBertForTokenClassification
|
||||
:members:
|
||||
|
||||
|
||||
DistilBertForQuestionAnswering
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.DistilBertForQuestionAnswering
|
||||
:members:
|
||||
|
||||
``TFDistilBertModel``
|
||||
TFDistilBertModel
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: pytorch_transformers.TFDistilBertModel
|
||||
.. autoclass:: transformers.TFDistilBertModel
|
||||
:members:
|
||||
|
||||
|
||||
``TFDistilBertForMaskedLM``
|
||||
TFDistilBertForMaskedLM
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: pytorch_transformers.TFDistilBertForMaskedLM
|
||||
.. autoclass:: transformers.TFDistilBertForMaskedLM
|
||||
:members:
|
||||
|
||||
|
||||
``TFDistilBertForSequenceClassification``
|
||||
TFDistilBertForSequenceClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: pytorch_transformers.TFDistilBertForSequenceClassification
|
||||
.. autoclass:: transformers.TFDistilBertForSequenceClassification
|
||||
:members:
|
||||
|
||||
|
||||
``TFDistilBertForQuestionAnswering``
|
||||
|
||||
TFDistilBertForMultipleChoice
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.TFDistilBertForMultipleChoice
|
||||
:members:
|
||||
|
||||
|
||||
|
||||
TFDistilBertForTokenClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.TFDistilBertForTokenClassification
|
||||
:members:
|
||||
|
||||
|
||||
TFDistilBertForQuestionAnswering
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: pytorch_transformers.TFDistilBertForQuestionAnswering
|
||||
.. autoclass:: transformers.TFDistilBertForQuestionAnswering
|
||||
:members:
|
||||
|
||||
148
docs/source/model_doc/electra.rst
Normal file
148
docs/source/model_doc/electra.rst
Normal file
@ -0,0 +1,148 @@
|
||||
ELECTRA
|
||||
----------------------------------------------------
|
||||
|
||||
Overview
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The ELECTRA model was proposed in the paper.
|
||||
`ELECTRA: Pre-training Text Encoders as Discriminators Rather Than Generators <https://openreview.net/pdf?id=r1xMH1BtvB>`__.
|
||||
ELECTRA is a new pre-training approach which trains two transformer models: the generator and the discriminator. The
|
||||
generator's role is to replace tokens in a sequence, and is therefore trained as a masked language model. The discriminator,
|
||||
which is the model we're interested in, tries to identify which tokens were replaced by the generator in the sequence.
|
||||
|
||||
The abstract from the paper is the following:
|
||||
|
||||
*Masked language modeling (MLM) pre-training methods such as BERT corrupt
|
||||
the input by replacing some tokens with [MASK] and then train a model to
|
||||
reconstruct the original tokens. While they produce good results when transferred
|
||||
to downstream NLP tasks, they generally require large amounts of compute to be
|
||||
effective. As an alternative, we propose a more sample-efficient pre-training task
|
||||
called replaced token detection. Instead of masking the input, our approach
|
||||
corrupts it by replacing some tokens with plausible alternatives sampled from a small
|
||||
generator network. Then, instead of training a model that predicts the original
|
||||
identities of the corrupted tokens, we train a discriminative model that predicts
|
||||
whether each token in the corrupted input was replaced by a generator sample
|
||||
or not. Thorough experiments demonstrate this new pre-training task is more
|
||||
efficient than MLM because the task is defined over all input tokens rather than
|
||||
just the small subset that was masked out. As a result, the contextual representations
|
||||
learned by our approach substantially outperform the ones learned by BERT
|
||||
given the same model size, data, and compute. The gains are particularly strong
|
||||
for small models; for example, we train a model on one GPU for 4 days that
|
||||
outperforms GPT (trained using 30x more compute) on the GLUE natural language
|
||||
understanding benchmark. Our approach also works well at scale, where it
|
||||
performs comparably to RoBERTa and XLNet while using less than 1/4 of their
|
||||
compute and outperforms them when using the same amount of compute.*
|
||||
|
||||
Tips:
|
||||
|
||||
- ELECTRA is the pre-training approach, therefore there is nearly no changes done to the underlying model: BERT. The
|
||||
only change is the separation of the embedding size and the hidden size -> The embedding size is generally smaller,
|
||||
while the hidden size is larger. An additional projection layer (linear) is used to project the embeddings from
|
||||
their embedding size to the hidden size. In the case where the embedding size is the same as the hidden size, no
|
||||
projection layer is used.
|
||||
- The ELECTRA checkpoints saved using `Google Research's implementation <https://github.com/google-research/electra>`__
|
||||
contain both the generator and discriminator. The conversion script requires the user to name which model to export
|
||||
into the correct architecture. Once converted to the HuggingFace format, these checkpoints may be loaded into all
|
||||
available ELECTRA models, however. This means that the discriminator may be loaded in the `ElectraForMaskedLM` model,
|
||||
and the generator may be loaded in the `ElectraForPreTraining` model (the classification head will be randomly
|
||||
initialized as it doesn't exist in the generator).
|
||||
|
||||
The original code can be found `here <https://github.com/google-research/electra>`_.
|
||||
|
||||
|
||||
ElectraConfig
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.ElectraConfig
|
||||
:members:
|
||||
|
||||
|
||||
ElectraTokenizer
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.ElectraTokenizer
|
||||
:members:
|
||||
|
||||
|
||||
ElectraTokenizerFast
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.ElectraTokenizerFast
|
||||
:members:
|
||||
|
||||
|
||||
ElectraModel
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.ElectraModel
|
||||
:members:
|
||||
|
||||
|
||||
ElectraForPreTraining
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.ElectraForPreTraining
|
||||
:members:
|
||||
|
||||
|
||||
ElectraForMaskedLM
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.ElectraForMaskedLM
|
||||
:members:
|
||||
|
||||
|
||||
ElectraForSequenceClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.ElectraForSequenceClassification
|
||||
:members:
|
||||
|
||||
|
||||
ElectraForTokenClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.ElectraForTokenClassification
|
||||
:members:
|
||||
|
||||
|
||||
ElectraForQuestionAnswering
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.ElectraForQuestionAnswering
|
||||
:members:
|
||||
|
||||
|
||||
TFElectraModel
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.TFElectraModel
|
||||
:members:
|
||||
|
||||
|
||||
TFElectraForPreTraining
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.TFElectraForPreTraining
|
||||
:members:
|
||||
|
||||
|
||||
TFElectraForMaskedLM
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.TFElectraForMaskedLM
|
||||
:members:
|
||||
|
||||
|
||||
TFElectraForTokenClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.TFElectraForTokenClassification
|
||||
:members:
|
||||
|
||||
|
||||
TFElectraForQuestionAnswering
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.TFElectraForQuestionAnswering
|
||||
:members:
|
||||
23
docs/source/model_doc/encoderdecoder.rst
Normal file
23
docs/source/model_doc/encoderdecoder.rst
Normal file
@ -0,0 +1,23 @@
|
||||
Encoder Decoder Models
|
||||
------------------------
|
||||
|
||||
This class can wrap an encoder model, such as ``BertModel`` and a decoder modeling with a language modeling head, such as ``BertForMaskedLM`` into a encoder-decoder model.
|
||||
|
||||
The ``EncoderDecoderModel`` class allows to instantiate a encoder decoder model using the ``from_encoder_decoder_pretrain`` class method taking a pretrained encoder and pretrained decoder model as an input.
|
||||
The ``EncoderDecoderModel`` is saved using the standard ``save_pretrained()`` method and can also again be loaded using the standard ``from_pretrained()`` method.
|
||||
|
||||
An application of this architecture could be *summarization* using two pretrained Bert models as is shown in the paper: `Text Summarization with Pretrained Encoders <https://arxiv.org/abs/1910.13461>`_ by Yang Liu and Mirella Lapata.
|
||||
|
||||
|
||||
``EncoderDecoderConfig``
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.EncoderDecoderConfig
|
||||
:members:
|
||||
|
||||
|
||||
``EncoderDecoderModel``
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.EncoderDecoderModel
|
||||
:members:
|
||||
117
docs/source/model_doc/flaubert.rst
Normal file
117
docs/source/model_doc/flaubert.rst
Normal file
@ -0,0 +1,117 @@
|
||||
FlauBERT
|
||||
----------------------------------------------------
|
||||
|
||||
Overview
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The FlauBERT model was proposed in the paper
|
||||
`FlauBERT: Unsupervised Language Model Pre-training for French <https://arxiv.org/abs/1912.05372>`__ by Hang Le et al.
|
||||
It's a transformer pre-trained using a masked language modeling (MLM) objective (BERT-like).
|
||||
|
||||
The abstract from the paper is the following:
|
||||
|
||||
*Language models have become a key step to achieve state-of-the art results in many different Natural Language
|
||||
Processing (NLP) tasks. Leveraging the huge amount of unlabeled texts nowadays available, they provide an efficient
|
||||
way to pre-train continuous word representations that can be fine-tuned for a downstream task, along with their
|
||||
contextualization at the sentence level. This has been widely demonstrated for English using contextualized
|
||||
representations (Dai and Le, 2015; Peters et al., 2018; Howard and Ruder, 2018; Radford et al., 2018; Devlin et
|
||||
al., 2019; Yang et al., 2019b). In this paper, we introduce and share FlauBERT, a model learned on a very large
|
||||
and heterogeneous French corpus. Models of different sizes are trained using the new CNRS (French National Centre
|
||||
for Scientific Research) Jean Zay supercomputer. We apply our French language models to diverse NLP tasks (text
|
||||
classification, paraphrasing, natural language inference, parsing, word sense disambiguation) and show that most
|
||||
of the time they outperform other pre-training approaches. Different versions of FlauBERT as well as a unified
|
||||
evaluation protocol for the downstream tasks, called FLUE (French Language Understanding Evaluation), are shared
|
||||
to the research community for further reproducible experiments in French NLP.*
|
||||
|
||||
The original code can be found `here <https://github.com/getalp/Flaubert>`_.
|
||||
|
||||
|
||||
FlaubertConfig
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.FlaubertConfig
|
||||
:members:
|
||||
|
||||
|
||||
FlaubertTokenizer
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.FlaubertTokenizer
|
||||
:members:
|
||||
|
||||
|
||||
FlaubertModel
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.FlaubertModel
|
||||
:members:
|
||||
|
||||
|
||||
FlaubertWithLMHeadModel
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.FlaubertWithLMHeadModel
|
||||
:members:
|
||||
|
||||
|
||||
FlaubertForSequenceClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.FlaubertForSequenceClassification
|
||||
:members:
|
||||
|
||||
|
||||
FlaubertForQuestionAnsweringSimple
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.FlaubertForQuestionAnsweringSimple
|
||||
:members:
|
||||
|
||||
|
||||
FlaubertForQuestionAnswering
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.FlaubertForQuestionAnswering
|
||||
:members:
|
||||
|
||||
|
||||
TFFlaubertModel
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.TFFlaubertModel
|
||||
:members:
|
||||
|
||||
|
||||
TFFlaubertWithLMHeadModel
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.TFFlaubertWithLMHeadModel
|
||||
:members:
|
||||
|
||||
|
||||
TFFlaubertForSequenceClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.TFFlaubertForSequenceClassification
|
||||
:members:
|
||||
|
||||
|
||||
TFFlaubertForMultipleChoice
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.TFFlaubertForMultipleChoice
|
||||
:members:
|
||||
|
||||
|
||||
TFFlaubertForTokenClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.TFFlaubertForTokenClassification
|
||||
:members:
|
||||
|
||||
|
||||
TFFlaubertForQuestionAnsweringSimple
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.TFFlaubertForQuestionAnsweringSimple
|
||||
:members:
|
||||
@ -1,57 +1,113 @@
|
||||
OpenAI GPT
|
||||
----------------------------------------------------
|
||||
|
||||
``OpenAIGPTConfig``
|
||||
Overview
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
OpenAI GPT model was proposed in `Improving Language Understanding by Generative Pre-Training <https://s3-us-west-2.amazonaws.com/openai-assets/research-covers/language-unsupervised/language_understanding_paper.pdf>`__
|
||||
by Alec Radford, Karthik Narasimhan, Tim Salimans and Ilya Sutskever. It's a causal (unidirectional)
|
||||
transformer pre-trained using language modeling on a large corpus will long range dependencies, the Toronto Book Corpus.
|
||||
|
||||
The abstract from the paper is the following:
|
||||
|
||||
*Natural language understanding comprises a wide range of diverse tasks such
|
||||
as textual entailment, question answering, semantic similarity assessment, and
|
||||
document classification. Although large unlabeled text corpora are abundant,
|
||||
labeled data for learning these specific tasks is scarce, making it challenging for
|
||||
discriminatively trained models to perform adequately. We demonstrate that large
|
||||
gains on these tasks can be realized by generative pre-training of a language model
|
||||
on a diverse corpus of unlabeled text, followed by discriminative fine-tuning on each
|
||||
specific task. In contrast to previous approaches, we make use of task-aware input
|
||||
transformations during fine-tuning to achieve effective transfer while requiring
|
||||
minimal changes to the model architecture. We demonstrate the effectiveness of
|
||||
our approach on a wide range of benchmarks for natural language understanding.
|
||||
Our general task-agnostic model outperforms discriminatively trained models that
|
||||
use architectures specifically crafted for each task, significantly improving upon the
|
||||
state of the art in 9 out of the 12 tasks studied.*
|
||||
|
||||
Tips:
|
||||
|
||||
- GPT is a model with absolute position embeddings so it's usually advised to pad the inputs on
|
||||
the right rather than the left.
|
||||
- GPT was trained with a causal language modeling (CLM) objective and is therefore powerful at predicting the next
|
||||
token in a sequence. Leveraging this feature allows GPT-2 to generate syntactically coherent text as
|
||||
it can be observed in the `run_generation.py` example script.
|
||||
|
||||
`Write With Transformer <https://transformer.huggingface.co/doc/gpt>`__ is a webapp created and hosted by
|
||||
Hugging Face showcasing the generative capabilities of several models. GPT is one of them.
|
||||
|
||||
The original code can be found `here <https://github.com/openai/finetune-transformer-lm>`_.
|
||||
|
||||
Note:
|
||||
|
||||
If you want to reproduce the original tokenization process of the `OpenAI GPT` paper, you will need to install
|
||||
``ftfy`` and ``SpaCy``::
|
||||
|
||||
pip install spacy ftfy==4.4.3
|
||||
python -m spacy download en
|
||||
|
||||
If you don't install ``ftfy`` and ``SpaCy``, the :class:`transformers.OpenAIGPTTokenizer` will default to tokenize using
|
||||
BERT's :obj:`BasicTokenizer` followed by Byte-Pair Encoding (which should be fine for most usage, don't
|
||||
worry).
|
||||
|
||||
OpenAIGPTConfig
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.OpenAIGPTConfig
|
||||
:members:
|
||||
|
||||
|
||||
``OpenAIGPTTokenizer``
|
||||
OpenAIGPTTokenizer
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.OpenAIGPTTokenizer
|
||||
:members: save_vocabulary
|
||||
|
||||
|
||||
OpenAIGPTTokenizerFast
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.OpenAIGPTTokenizerFast
|
||||
:members:
|
||||
|
||||
|
||||
``OpenAIGPTModel``
|
||||
OpenAIGPTModel
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.OpenAIGPTModel
|
||||
:members:
|
||||
|
||||
|
||||
``OpenAIGPTLMHeadModel``
|
||||
OpenAIGPTLMHeadModel
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.OpenAIGPTLMHeadModel
|
||||
:members:
|
||||
|
||||
|
||||
``OpenAIGPTDoubleHeadsModel``
|
||||
OpenAIGPTDoubleHeadsModel
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.OpenAIGPTDoubleHeadsModel
|
||||
:members:
|
||||
|
||||
|
||||
``TFOpenAIGPTModel``
|
||||
TFOpenAIGPTModel
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: pytorch_transformers.TFOpenAIGPTModel
|
||||
.. autoclass:: transformers.TFOpenAIGPTModel
|
||||
:members:
|
||||
|
||||
|
||||
``TFOpenAIGPTLMHeadModel``
|
||||
TFOpenAIGPTLMHeadModel
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: pytorch_transformers.TFOpenAIGPTLMHeadModel
|
||||
.. autoclass:: transformers.TFOpenAIGPTLMHeadModel
|
||||
:members:
|
||||
|
||||
|
||||
``TFOpenAIGPTDoubleHeadsModel``
|
||||
TFOpenAIGPTDoubleHeadsModel
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: pytorch_transformers.TFOpenAIGPTDoubleHeadsModel
|
||||
.. autoclass:: transformers.TFOpenAIGPTDoubleHeadsModel
|
||||
:members:
|
||||
|
||||
@ -1,57 +1,100 @@
|
||||
OpenAI GPT2
|
||||
----------------------------------------------------
|
||||
|
||||
``GPT2Config``
|
||||
Overview
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
OpenAI GPT-2 model was proposed in
|
||||
`Language Models are Unsupervised Multitask Learners <https://cdn.openai.com/better-language-models/language_models_are_unsupervised_multitask_learners.pdf>`_
|
||||
by Alec Radford*, Jeffrey Wu*, Rewon Child, David Luan, Dario Amodei** and Ilya Sutskever**.
|
||||
It's a causal (unidirectional) transformer pre-trained using language modeling on a very large
|
||||
corpus of ~40 GB of text data.
|
||||
|
||||
The abstract from the paper is the following:
|
||||
|
||||
*GPT-2 is a large transformer-based language model with 1.5 billion parameters, trained on a dataset[1]
|
||||
of 8 million web pages. GPT-2 is trained with a simple objective: predict the next word, given all of the previous
|
||||
words within some text. The diversity of the dataset causes this simple goal to contain naturally occurring
|
||||
demonstrations of many tasks across diverse domains. GPT-2 is a direct scale-up of GPT, with more than 10X
|
||||
the parameters and trained on more than 10X the amount of data.*
|
||||
|
||||
Tips:
|
||||
|
||||
- GPT-2 is a model with absolute position embeddings so it's usually advised to pad the inputs on
|
||||
the right rather than the left.
|
||||
- GPT-2 was trained with a causal language modeling (CLM) objective and is therefore powerful at predicting the next
|
||||
token in a sequence. Leveraging this feature allows GPT-2 to generate syntactically coherent text as
|
||||
it can be observed in the `run_generation.py` example script.
|
||||
- The PyTorch models can take the `past` as input, which is the previously computed key/value attention pairs. Using
|
||||
this `past` value prevents the model from re-computing pre-computed values in the context of text generation.
|
||||
See `reusing the past in generative models <../quickstart.html#using-the-past>`_ for more information on the usage
|
||||
of this argument.
|
||||
|
||||
`Write With Transformer <https://transformer.huggingface.co/doc/gpt2-large>`__ is a webapp created and hosted by
|
||||
Hugging Face showcasing the generative capabilities of several models. GPT-2 is one of them and is available in five
|
||||
different sizes: small, medium, large, xl and a distilled version of the small checkpoint: distilgpt-2.
|
||||
|
||||
The original code can be found `here <https://openai.com/blog/better-language-models/>`_.
|
||||
|
||||
|
||||
GPT2Config
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.GPT2Config
|
||||
:members:
|
||||
|
||||
|
||||
``GPT2Tokenizer``
|
||||
GPT2Tokenizer
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.GPT2Tokenizer
|
||||
:members: save_vocabulary
|
||||
|
||||
|
||||
GPT2TokenizerFast
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.GPT2TokenizerFast
|
||||
:members:
|
||||
|
||||
|
||||
``GPT2Model``
|
||||
GPT2Model
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.GPT2Model
|
||||
:members:
|
||||
|
||||
|
||||
``GPT2LMHeadModel``
|
||||
GPT2LMHeadModel
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.GPT2LMHeadModel
|
||||
:members:
|
||||
|
||||
|
||||
``GPT2DoubleHeadsModel``
|
||||
GPT2DoubleHeadsModel
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.GPT2DoubleHeadsModel
|
||||
:members:
|
||||
|
||||
|
||||
``TFGPT2Model``
|
||||
TFGPT2Model
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: pytorch_transformers.TFGPT2Model
|
||||
.. autoclass:: transformers.TFGPT2Model
|
||||
:members:
|
||||
|
||||
|
||||
``TFGPT2LMHeadModel``
|
||||
TFGPT2LMHeadModel
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: pytorch_transformers.TFGPT2LMHeadModel
|
||||
.. autoclass:: transformers.TFGPT2LMHeadModel
|
||||
:members:
|
||||
|
||||
|
||||
``TFGPT2DoubleHeadsModel``
|
||||
TFGPT2DoubleHeadsModel
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: pytorch_transformers.TFGPT2DoubleHeadsModel
|
||||
.. autoclass:: transformers.TFGPT2DoubleHeadsModel
|
||||
:members:
|
||||
|
||||
104
docs/source/model_doc/longformer.rst
Normal file
104
docs/source/model_doc/longformer.rst
Normal file
@ -0,0 +1,104 @@
|
||||
Longformer
|
||||
----------------------------------------------------
|
||||
**DISCLAIMER:** This model is still a work in progress, if you see something strange,
|
||||
file a `Github Issue <https://github.com/huggingface/transformers/issues/new?assignees=&labels=&template=bug-report.md&title>`_
|
||||
|
||||
Overview
|
||||
~~~~~~~~~
|
||||
The Longformer model was presented in `Longformer: The Long-Document Transformer <https://arxiv.org/pdf/2004.05150.pdf>`_ by Iz Beltagy, Matthew E. Peters, Arman Cohan.
|
||||
Here the abstract:
|
||||
|
||||
*Transformer-based models are unable to process long sequences due to their self-attention operation, which scales quadratically with the sequence length. To address this limitation, we introduce the Longformer with an attention mechanism that scales linearly with sequence length, making it easy to process documents of thousands of tokens or longer. Longformer's attention mechanism is a drop-in replacement for the standard self-attention and combines a local windowed attention with a task motivated global attention. Following prior work on long-sequence transformers, we evaluate Longformer on character-level language modeling and achieve state-of-the-art results on text8 and enwik8. In contrast to most prior work, we also pretrain Longformer and finetune it on a variety of downstream tasks. Our pretrained Longformer consistently outperforms RoBERTa on long document tasks and sets new state-of-the-art results on WikiHop and TriviaQA.*
|
||||
|
||||
The Authors' code can be found `here <https://github.com/allenai/longformer>`_ .
|
||||
|
||||
Longformer Self Attention
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
Longformer self attention employs self attention on both a "local" context and a "global" context.
|
||||
Most tokens only attend "locally" to each other meaning that each token attends to its :math:`\frac{1}{2} w` previous tokens and :math:`\frac{1}{2} w` succeding tokens with :math:`w` being the window length as defined in `config.attention_window`. Note that `config.attention_window` can be of type ``list`` to define a different :math:`w` for each layer.
|
||||
A selecetd few tokens attend "globally" to all other tokens, as it is conventionally done for all tokens in *e.g.* `BertSelfAttention`.
|
||||
|
||||
Note that "locally" and "globally" attending tokens are projected by different query, key and value matrices.
|
||||
Also note that every "locally" attending token not only attends to tokens within its window :math:`w`, but also to all "globally" attending tokens so that global attention is *symmetric*.
|
||||
|
||||
The user can define which tokens attend "locally" and which tokens attend "globally" by setting the tensor `global_attention_mask` at run-time appropriately. `Longformer` employs the following logic for `global_attention_mask`: `0` - the token attends "locally", `1` - token attends "globally". For more information please also refer to :func:`~transformers.LongformerModel.forward` method.
|
||||
|
||||
Using Longformer self attention, the memory and time complexity of the query-key matmul operation, which usually represents the memory and time bottleneck, can be reduced from :math:`\mathcal{O}(n_s \times n_s)` to :math:`\mathcal{O}(n_s \times w)`, with :math:`n_s` being the sequence length and :math:`w` being the average window size. It is assumed that the number of "globally" attending tokens is insignificant as compared to the number of "locally" attending tokens.
|
||||
|
||||
For more information, please refer to the official `paper <https://arxiv.org/pdf/2004.05150.pdf>`_ .
|
||||
|
||||
|
||||
Training
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
``LongformerForMaskedLM`` is trained the exact same way, ``RobertaForMaskedLM`` is trained and
|
||||
should be used as follows:
|
||||
|
||||
::
|
||||
|
||||
input_ids = tokenizer.encode('This is a sentence from [MASK] training data', return_tensors='pt')
|
||||
mlm_labels = tokenizer.encode('This is a sentence from the training data', return_tensors='pt')
|
||||
|
||||
loss = model(input_ids, labels=input_ids, masked_lm_labels=mlm_labels)[0]
|
||||
|
||||
|
||||
LongformerConfig
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.LongformerConfig
|
||||
:members:
|
||||
|
||||
|
||||
LongformerTokenizer
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.LongformerTokenizer
|
||||
:members:
|
||||
|
||||
|
||||
LongformerTokenizerFast
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.LongformerTokenizerFast
|
||||
:members:
|
||||
|
||||
|
||||
LongformerModel
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.LongformerModel
|
||||
:members:
|
||||
|
||||
|
||||
LongformerForMaskedLM
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.LongformerForMaskedLM
|
||||
:members:
|
||||
|
||||
|
||||
LongformerForSequenceClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.LongformerForSequenceClassification
|
||||
:members:
|
||||
|
||||
|
||||
LongformerForMultipleChoice
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.LongformerForMultipleChoice
|
||||
:members:
|
||||
|
||||
|
||||
LongformerForTokenClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.LongformerForTokenClassification
|
||||
:members:
|
||||
|
||||
|
||||
LongformerForQuestionAnswering
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.LongformerForQuestionAnswering
|
||||
:members:
|
||||
111
docs/source/model_doc/marian.rst
Normal file
111
docs/source/model_doc/marian.rst
Normal file
@ -0,0 +1,111 @@
|
||||
MarianMT
|
||||
----------------------------------------------------
|
||||
**DISCLAIMER:** If you see something strange,
|
||||
file a `Github Issue <https://github.com/huggingface/transformers/issues/new?assignees=&labels=&template=bug-report.md&title>`__ and assign
|
||||
@sshleifer. Translations should be similar, but not identical to, output in the test set linked to in each model card.
|
||||
|
||||
Implementation Notes
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
- Each model is about 298 MB on disk, there are 1,000+ models.
|
||||
- The list of supported language pairs can be found `here <https://huggingface.co/Helsinki-NLP>`__.
|
||||
- The 1,000+ models were originally trained by `Jörg Tiedemann <https://researchportal.helsinki.fi/en/persons/j%C3%B6rg-tiedemann>`__ using the `Marian <https://marian-nmt.github.io/>`_ C++ library, which supports fast training and translation.
|
||||
- All models are transformer encoder-decoders with 6 layers in each component. Each model's performance is documented in a model card.
|
||||
- The 80 opus models that require BPE preprocessing are not supported.
|
||||
- The modeling code is the same as ``BartForConditionalGeneration`` with a few minor modifications:
|
||||
- static (sinusoid) positional embeddings (``MarianConfig.static_position_embeddings=True``)
|
||||
- a new final_logits_bias (``MarianConfig.add_bias_logits=True``)
|
||||
- no layernorm_embedding (``MarianConfig.normalize_embedding=False``)
|
||||
- the model starts generating with pad_token_id (which has 0 token_embedding) as the prefix. (Bart uses <s/>)
|
||||
- Code to bulk convert models can be found in ``convert_marian_to_pytorch.py``
|
||||
|
||||
Naming
|
||||
~~~~~~
|
||||
- All model names use the following format: ``Helsinki-NLP/opus-mt-{src}-{tgt}``
|
||||
- The language codes used to name models are inconsistent. Two digit codes can usually be found `here <https://developers.google.com/admin-sdk/directory/v1/languages>`_, three digit codes require googling "language code {code}".
|
||||
- Codes formatted like ``es_AR`` are usually ``code_{region}``. That one is spanish documents from Argentina.
|
||||
|
||||
|
||||
Multilingual Models
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
All model names use the following format: ``Helsinki-NLP/opus-mt-{src}-{tgt}``:
|
||||
- if ``src`` is in all caps, the model supports multiple input languages, you can figure out which ones by looking at the model card, or the Group Members `mapping <https://gist.github.com/sshleifer/6d20e7761931b08e73c3219027b97b8a>`_ .
|
||||
- if ``tgt`` is in all caps, the model can output multiple languages, and you should specify a language code by prepending the desired output language to the src_text
|
||||
- You can see a tokenizer's supported language codes in ``tokenizer.supported_language_codes``
|
||||
|
||||
Example of translating english to many romance languages, using language codes:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from transformers import MarianMTModel, MarianTokenizer
|
||||
src_text = [
|
||||
'>>fr<< this is a sentence in english that we want to translate to french',
|
||||
'>>pt<< This should go to portuguese',
|
||||
'>>es<< And this to Spanish'
|
||||
]
|
||||
|
||||
model_name = 'Helsinki-NLP/opus-mt-en-ROMANCE'
|
||||
tokenizer = MarianTokenizer.from_pretrained(model_name)
|
||||
print(tokenizer.supported_language_codes)
|
||||
model = MarianMTModel.from_pretrained(model_name)
|
||||
translated = model.generate(**tokenizer.prepare_translation_batch(src_text))
|
||||
tgt_text = [tokenizer.decode(t, skip_special_tokens=True) for t in translated]
|
||||
# ["c'est une phrase en anglais que nous voulons traduire en français",
|
||||
# 'Isto deve ir para o português.',
|
||||
# 'Y esto al español']
|
||||
|
||||
Sometimes, models were trained on collections of languages that do not resolve to a group. In this case, _ is used as a separator for src or tgt, as in ``'Helsinki-NLP/opus-mt-en_el_es_fi-en_el_es_fi'``. These still require language codes.
|
||||
There are many supported regional language codes, like ``>>es_ES<<`` (Spain) and ``>>es_AR<<`` (Argentina), that do not seem to change translations. I have not found these to provide different results than just using ``>>es<<``.
|
||||
|
||||
For Example:
|
||||
- ``Helsinki-NLP/opus-mt-NORTH_EU-NORTH_EU``: translates from all NORTH_EU languages (see `mapping <https://gist.github.com/sshleifer/6d20e7761931b08e73c3219027b97b8a>`_) to all NORTH_EU languages. Use a special language code like ``>>de<<`` to specify output language.
|
||||
- ``Helsinki-NLP/opus-mt-ROMANCE-en``: translates from many romance languages to english, no codes needed since there is only 1 tgt language.
|
||||
|
||||
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
GROUP_MEMBERS = {
|
||||
'ZH': ['cmn', 'cn', 'yue', 'ze_zh', 'zh_cn', 'zh_CN', 'zh_HK', 'zh_tw', 'zh_TW', 'zh_yue', 'zhs', 'zht', 'zh'],
|
||||
'ROMANCE': ['fr', 'fr_BE', 'fr_CA', 'fr_FR', 'wa', 'frp', 'oc', 'ca', 'rm', 'lld', 'fur', 'lij', 'lmo', 'es', 'es_AR', 'es_CL', 'es_CO', 'es_CR', 'es_DO', 'es_EC', 'es_ES', 'es_GT', 'es_HN', 'es_MX', 'es_NI', 'es_PA', 'es_PE', 'es_PR', 'es_SV', 'es_UY', 'es_VE', 'pt', 'pt_br', 'pt_BR', 'pt_PT', 'gl', 'lad', 'an', 'mwl', 'it', 'it_IT', 'co', 'nap', 'scn', 'vec', 'sc', 'ro', 'la'],
|
||||
'NORTH_EU': ['de', 'nl', 'fy', 'af', 'da', 'fo', 'is', 'no', 'nb', 'nn', 'sv'],
|
||||
'SCANDINAVIA': ['da', 'fo', 'is', 'no', 'nb', 'nn', 'sv'],
|
||||
'SAMI': ['se', 'sma', 'smj', 'smn', 'sms'],
|
||||
'NORWAY': ['nb_NO', 'nb', 'nn_NO', 'nn', 'nog', 'no_nb', 'no'],
|
||||
'CELTIC': ['ga', 'cy', 'br', 'gd', 'kw', 'gv']
|
||||
}
|
||||
|
||||
Code to see available pretrained models:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from transformers.hf_api import HfApi
|
||||
model_list = HfApi().model_list()
|
||||
org = "Helsinki-NLP"
|
||||
model_ids = [x.modelId for x in model_list if x.modelId.startswith(org)]
|
||||
suffix = [x.split('/')[1] for x in model_ids]
|
||||
multi_models = [f'{org}/{s}' for s in suffix if s != s.lower()]
|
||||
|
||||
MarianConfig
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
.. autoclass:: transformers.MarianConfig
|
||||
:members:
|
||||
|
||||
|
||||
MarianTokenizer
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.MarianTokenizer
|
||||
:members: prepare_translation_batch
|
||||
|
||||
|
||||
MarianMTModel
|
||||
~~~~~~~~~~~~~
|
||||
|
||||
Pytorch version of marian-nmt's transformer.h (c++). Designed for the OPUS-NMT translation checkpoints.
|
||||
Model API is identical to BartForConditionalGeneration.
|
||||
Available models are listed at `Model List <https://huggingface.co/models?search=Helsinki-NLP>`__
|
||||
This class inherits all functionality from ``BartForConditionalGeneration``, see that page for method signatures.
|
||||
|
||||
.. autoclass:: transformers.MarianMTModel
|
||||
:members:
|
||||
169
docs/source/model_doc/mobilebert.rst
Normal file
169
docs/source/model_doc/mobilebert.rst
Normal file
@ -0,0 +1,169 @@
|
||||
MobileBERT
|
||||
----------------------------------------------------
|
||||
|
||||
Overview
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The MobileBERT model was proposed in `MobileBERT: a Compact Task-Agnostic BERT
|
||||
for Resource-Limited Devices <https://arxiv.org/abs/2004.02984>`__
|
||||
by Zhiqing Sun, Hongkun Yu, Xiaodan Song, Renjie Liu, Yiming Yang, and Denny Zhou. It's a bidirectional transformer
|
||||
based on the BERT model, which is compressed and accelerated using several approaches.
|
||||
|
||||
The abstract from the paper is the following:
|
||||
|
||||
*Natural Language Processing (NLP) has recently achieved great success by using huge pre-trained models with hundreds
|
||||
of millions of parameters. However, these models suffer from heavy model sizes and high latency such that they cannot
|
||||
be deployed to resource-limited mobile devices. In this paper, we propose MobileBERT for compressing and accelerating
|
||||
the popular BERT model. Like the original BERT, MobileBERT is task-agnostic, that is, it can be generically applied
|
||||
to various downstream NLP tasks via simple fine-tuning. Basically, MobileBERT is a thin version of BERT_LARGE, while
|
||||
equipped with bottleneck structures and a carefully designed balance between self-attentions and feed-forward
|
||||
networks. To train MobileBERT, we first train a specially designed teacher model, an inverted-bottleneck incorporated
|
||||
BERT_LARGE model. Then, we conduct knowledge transfer from this teacher to MobileBERT. Empirical studies show that
|
||||
MobileBERT is 4.3x smaller and 5.5x faster than BERT_BASE while achieving competitive results on well-known
|
||||
benchmarks. On the natural language inference tasks of GLUE, MobileBERT achieves a GLUEscore o 77.7
|
||||
(0.6 lower than BERT_BASE), and 62 ms latency on a Pixel 4 phone. On the SQuAD v1.1/v2.0 question answering task,
|
||||
MobileBERT achieves a dev F1 score of 90.0/79.2 (1.5/2.1 higher than BERT_BASE).*
|
||||
|
||||
Tips:
|
||||
|
||||
- MobileBERT is a model with absolute position embeddings so it's usually advised to pad the inputs on
|
||||
the right rather than the left.
|
||||
- MobileBERT is similar to BERT and therefore relies on the masked language modeling (MLM) objective.
|
||||
It is therefore efficient at predicting masked tokens and at NLU in general, but is not optimal for
|
||||
text generation. Models trained with a causal language modeling (CLM) objective are better in that regard.
|
||||
|
||||
The original code can be found `here <https://github.com/google-research/mobilebert>`_.
|
||||
|
||||
MobileBertConfig
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.MobileBertConfig
|
||||
:members:
|
||||
|
||||
|
||||
MobileBertTokenizer
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.MobileBertTokenizer
|
||||
:members: build_inputs_with_special_tokens, get_special_tokens_mask,
|
||||
create_token_type_ids_from_sequences, save_vocabulary
|
||||
|
||||
|
||||
MobileBertTokenizerFast
|
||||
~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.MobileBertTokenizerFast
|
||||
:members:
|
||||
|
||||
|
||||
MobileBertModel
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.MobileBertModel
|
||||
:members:
|
||||
|
||||
|
||||
MobileBertForPreTraining
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.MobileBertForPreTraining
|
||||
:members:
|
||||
|
||||
|
||||
MobileBertForMaskedLM
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.MobileBertForMaskedLM
|
||||
:members:
|
||||
|
||||
|
||||
MobileBertForNextSentencePrediction
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.MobileBertForNextSentencePrediction
|
||||
:members:
|
||||
|
||||
|
||||
MobileBertForSequenceClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.MobileBertForSequenceClassification
|
||||
:members:
|
||||
|
||||
|
||||
MobileBertForMultipleChoice
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.MobileBertForMultipleChoice
|
||||
:members:
|
||||
|
||||
|
||||
MobileBertForTokenClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.MobileBertForTokenClassification
|
||||
:members:
|
||||
|
||||
|
||||
MobileBertForQuestionAnswering
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.MobileBertForQuestionAnswering
|
||||
:members:
|
||||
|
||||
|
||||
TFMobileBertModel
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.TFMobileBertModel
|
||||
:members:
|
||||
|
||||
|
||||
TFMobileBertForPreTraining
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.TFMobileBertForPreTraining
|
||||
:members:
|
||||
|
||||
|
||||
TFMobileBertForMaskedLM
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.TFMobileBertForMaskedLM
|
||||
:members:
|
||||
|
||||
|
||||
TFMobileBertForNextSentencePrediction
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.TFMobileBertForNextSentencePrediction
|
||||
:members:
|
||||
|
||||
|
||||
TFMobileBertForSequenceClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.TFMobileBertForSequenceClassification
|
||||
:members:
|
||||
|
||||
|
||||
TFMobileBertForMultipleChoice
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.TFMobileBertForMultipleChoice
|
||||
:members:
|
||||
|
||||
|
||||
TFMobileBertForTokenClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.TFMobileBertForTokenClassification
|
||||
:members:
|
||||
|
||||
|
||||
TFMobileBertForQuestionAnswering
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.TFMobileBertForQuestionAnswering
|
||||
:members:
|
||||
|
||||
114
docs/source/model_doc/reformer.rst
Normal file
114
docs/source/model_doc/reformer.rst
Normal file
@ -0,0 +1,114 @@
|
||||
Reformer
|
||||
----------------------------------------------------
|
||||
**DISCLAIMER:** This model is still a work in progress, if you see something strange,
|
||||
file a `Github Issue <https://github.com/huggingface/transformers/issues/new?assignees=&labels=&template=bug-report.md&title>`_
|
||||
|
||||
Overview
|
||||
~~~~~~~~~~
|
||||
The Reformer model was presented in `Reformer: The Efficient Transformer <https://arxiv.org/abs/2001.04451.pdf>`_ by Nikita Kitaev, Łukasz Kaiser, Anselm Levskaya.
|
||||
Here the abstract:
|
||||
|
||||
*Large Transformer models routinely achieve state-of-the-art results on a number of tasks but training these models can be prohibitively costly, especially on long sequences. We introduce two techniques to improve the efficiency of Transformers. For one, we replace dot-product attention by one that uses locality-sensitive hashing, changing its complexity from O(L^2) to O(Llog(L)), where L is the length of the sequence. Furthermore, we use reversible residual layers instead of the standard residuals, which allows storing activations only once in the training process instead of N times, where N is the number of layers. The resulting model, the Reformer, performs on par with Transformer models while being much more memory-efficient and much faster on long sequences.*
|
||||
|
||||
The Authors' code can be found `here <https://github.com/google/trax/tree/master/trax/models/reformer>`_ .
|
||||
|
||||
Axial Positional Encodings
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
Axial Positional Encodings were first implemented in Google's `trax library <https://github.com/google/trax/blob/4d99ad4965bab1deba227539758d59f0df0fef48/trax/layers/research/position_encodings.py#L29>`_ and developed by the authors of this model's paper. In models that are treating very long input sequences, the conventional position id encodings store an embedings vector of size :math:`d` being the ``config.hidden_size`` for every position :math:`i, \ldots, n_s`, with :math:`n_s` being ``config.max_embedding_size``. *E.g.*, having a sequence length of :math:`n_s = 2^{19} \approx 0.5M` and a ``config.hidden_size`` of :math:`d = 2^{10} \approx 1000` would result in a position encoding matrix:
|
||||
|
||||
.. math::
|
||||
X_{i,j}, \text{ with } i \in \left[1,\ldots, d\right] \text{ and } j \in \left[1,\ldots, n_s\right]
|
||||
|
||||
which alone has over 500M parameters to store. Axial positional encodings factorize :math:`X_{i,j}` into two matrices:
|
||||
|
||||
.. math::
|
||||
X^{1}_{i,j}, \text{ with } i \in \left[1,\ldots, d^1\right] \text{ and } j \in \left[1,\ldots, n_s^1\right]
|
||||
|
||||
and
|
||||
|
||||
.. math::
|
||||
X^{2}_{i,j}, \text{ with } i \in \left[1,\ldots, d^2\right] \text{ and } j \in \left[1,\ldots, n_s^2\right]
|
||||
|
||||
with:
|
||||
|
||||
.. math::
|
||||
d = d^1 + d^2 \text{ and } n_s = n_s^1 \times n_s^2 .
|
||||
|
||||
Therefore the following holds:
|
||||
|
||||
.. math::
|
||||
X_{i,j} = \begin{cases}
|
||||
X^{1}_{i, k}, & \text{if }\ i < d^1 \text{ with } k = j \mod n_s^1 \\
|
||||
X^{2}_{i - d^1, l}, & \text{if } i \ge d^1 \text{ with } l = \lfloor\frac{j}{n_s^1}\rfloor
|
||||
\end{cases}
|
||||
|
||||
Intuitively, this means that a position embedding vector :math:`x_j \in \mathbb{R}^{d}` is now the composition of two factorized embedding vectors: :math:`x^1_{k, l} + x^2_{l, k}`, where as the ``config.max_embedding_size`` dimension :math:`j` is factorized into :math:`k \text{ and } l`.
|
||||
This design ensures that each position embedding vector :math:`x_j` is unique.
|
||||
|
||||
Using the above example again, axial position encoding with :math:`d^1 = 2^5, d^2 = 2^5, n_s^1 = 2^9, n_s^2 = 2^{10}` can drastically reduced the number of parameters to :math:`2^{14} + 2^{15} \approx 49000` parameters.
|
||||
|
||||
In practice, the parameter ``config.axial_pos_embds_dim`` is set to ``list``:math:`(d^1, d^2)` which sum has to be equal to ``config.hidden_size`` and ``config.axial_pos_shape`` is set to ``list``:math:`(n_s^1, n_s^2)` and which product has to be equal to ``config.max_embedding_size`` which during training has to be equal to the ``sequence length`` of the ``input_ids``.
|
||||
|
||||
|
||||
|
||||
LSH Self Attention
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
In Locality sensitive hashing (LSH) self attention the key and query projection weights are tied. Therefore, the key query embedding vectors are also tied.
|
||||
LSH self attention uses the locality sensitive
|
||||
hashing mechanism proposed in `Practical and Optimal LSH for Angular Distance <https://arxiv.org/abs/1509.02897>`_ to assign each of the tied key query embedding vectors to one of ``config.num_buckets`` possible buckets. The premise is that the more "similar" key query embedding vectors (in terms of *cosine similarity*) are to each other, the more likely they are assigned to the same bucket.
|
||||
The accuracy of the LSH mechanism can be improved by increasing ``config.num_hashes`` or directly the argument ``num_hashes`` of the forward function so that the output of the LSH self attention better approximates the output of the "normal" full self attention.
|
||||
The buckets are then sorted and chunked into query key embedding vector chunks each of length ``config.lsh_chunk_length``. For each chunk, the query embedding vectors attend to its key vectors (which are tied to themselves) and to the key embedding vectors of ``config.lsh_num_chunks_before`` previous neighboring chunks and ``config.lsh_num_chunks_after`` following neighboring chunks.
|
||||
For more information, see the `original Paper <https://arxiv.org/abs/2001.04451>`_ or this great `blog post <https://www.pragmatic.ml/reformer-deep-dive/>`_.
|
||||
|
||||
Note that ``config.num_buckets`` can also be factorized into a ``list``:math:`(n_{\text{buckets}}^1, n_{\text{buckets}}^2)`. This way instead of assigning the query key embedding vectors to one of :math:`(1,\ldots, n_{\text{buckets}})` they are assigned to one of :math:`(1-1,\ldots, n_{\text{buckets}}^1-1, \ldots, 1-n_{\text{buckets}}^2, \ldots, n_{\text{buckets}}^1-n_{\text{buckets}}^2)`. This is crucial for very long sequences to save memory.
|
||||
|
||||
When training a model from scratch, it is recommended to leave ``config.num_buckets=None``, so that depending on the sequence length a good value for ``num_buckets`` is calculated on the fly. This value will then automatically be saved in the config and should be reused for inference.
|
||||
|
||||
Using LSH self attention, the memory and time complexity of the query-key matmul operation can be reduced from :math:`\mathcal{O}(n_s \times n_s)` to :math:`\mathcal{O}(n_s \times \log(n_s))`, which usually represents the memory and time bottleneck in a transformer model, with :math:`n_s` being the sequence length.
|
||||
|
||||
|
||||
Local Self Attention
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
Local self attention is essentially a "normal" self attention layer with
|
||||
key, query and value projections, but is chunked so that in each chunk of length ``config.local_chunk_length`` the query embedding vectors only attends to the key embedding vectors in its chunk and to the key embedding vectors of ``config.local_num_chunks_before`` previous neighboring chunks and ``config.local_num_chunks_after`` following neighboring chunks.
|
||||
|
||||
Using Local self attention, the memory and time complexity of the query-key matmul operation can be reduced from :math:`\mathcal{O}(n_s \times n_s)` to :math:`\mathcal{O}(n_s \times \log(n_s))`, which usually represents the memory and time bottleneck in a transformer model, with :math:`n_s` being the sequence length.
|
||||
|
||||
|
||||
Training
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
During training, we must ensure that the sequence length is set to a value that can be divided by the least common multiple of ``config.lsh_chunk_length`` and ``config.local_chunk_length`` and that the parameters of the Axial Positional Encodings are correctly set as described above. Reformer is very memory efficient so that the model can easily be trained on sequences as long as 64000 tokens.
|
||||
For training, the ``ReformerModelWithLMHead`` should be used as follows:
|
||||
|
||||
::
|
||||
|
||||
input_ids = tokenizer.encode('This is a sentence from the training data', return_tensors='pt')
|
||||
loss = model(input_ids, labels=input_ids)[0]
|
||||
|
||||
|
||||
ReformerConfig
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.ReformerConfig
|
||||
:members:
|
||||
|
||||
|
||||
ReformerTokenizer
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.ReformerTokenizer
|
||||
:members:
|
||||
|
||||
|
||||
ReformerModel
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.ReformerModel
|
||||
:members:
|
||||
|
||||
|
||||
ReformerModelWithLMHead
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.ReformerModelWithLMHead
|
||||
:members:
|
||||
39
docs/source/model_doc/retribert.rst
Normal file
39
docs/source/model_doc/retribert.rst
Normal file
@ -0,0 +1,39 @@
|
||||
RetriBERT
|
||||
----------------------------------------------------
|
||||
|
||||
Overview
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The RetriBERT model was proposed in the blog post
|
||||
`Explain Anything Like I'm Five: A Model for Open Domain Long Form Question Answering <https://yjernite.github.io/lfqa.html>`__,
|
||||
RetriBERT is a small model that uses either a single or pair of Bert encoders with lower-dimension projection for dense semantic indexing of text.
|
||||
|
||||
Code to train and use the model can be found `here <https://github.com/huggingface/transformers/tree/master/examples/distillation>`_.
|
||||
|
||||
|
||||
RetriBertConfig
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.RetriBertConfig
|
||||
:members:
|
||||
|
||||
|
||||
RetriBertTokenizer
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.RetriBertTokenizer
|
||||
:members:
|
||||
|
||||
|
||||
RetriBertTokenizerFast
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.RetriBertTokenizerFast
|
||||
:members:
|
||||
|
||||
|
||||
RetriBertModel
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.RetriBertModel
|
||||
:members:
|
||||
@ -1,57 +1,140 @@
|
||||
RoBERTa
|
||||
----------------------------------------------------
|
||||
|
||||
``RobertaConfig``
|
||||
Overview
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The RoBERTa model was proposed in `RoBERTa: A Robustly Optimized BERT Pretraining Approach <https://arxiv.org/abs/1907.11692>`_
|
||||
by Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer,
|
||||
Veselin Stoyanov. It is based on Google's BERT model released in 2018.
|
||||
|
||||
It builds on BERT and modifies key hyperparameters, removing the next-sentence pretraining
|
||||
objective and training with much larger mini-batches and learning rates.
|
||||
|
||||
The abstract from the paper is the following:
|
||||
|
||||
*Language model pretraining has led to significant performance gains but careful comparison between different
|
||||
approaches is challenging. Training is computationally expensive, often done on private datasets of different sizes,
|
||||
and, as we will show, hyperparameter choices have significant impact on the final results. We present a replication
|
||||
study of BERT pretraining (Devlin et al., 2019) that carefully measures the impact of many key hyperparameters and
|
||||
training data size. We find that BERT was significantly undertrained, and can match or exceed the performance of
|
||||
every model published after it. Our best model achieves state-of-the-art results on GLUE, RACE and SQuAD. These
|
||||
results highlight the importance of previously overlooked design choices, and raise questions about the source
|
||||
of recently reported improvements. We release our models and code.*
|
||||
|
||||
Tips:
|
||||
|
||||
- This implementation is the same as :class:`~transformers.BertModel` with a tiny embeddings tweak as well as a
|
||||
setup for Roberta pretrained models.
|
||||
- RoBERTa has the same architecture as BERT, but uses a byte-level BPE as a tokenizer (same as GPT-2) and uses a
|
||||
different pre-training scheme.
|
||||
- RoBERTa doesn't have `token_type_ids`, you don't need to indicate which token belongs to which segment. Just separate your segments with the separation token `tokenizer.sep_token` (or `</s>`)
|
||||
- `Camembert <./camembert.html>`__ is a wrapper around RoBERTa. Refer to this page for usage examples.
|
||||
|
||||
The original code can be found `here <https://github.com/pytorch/fairseq/tree/master/examples/roberta>`_.
|
||||
|
||||
|
||||
RobertaConfig
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.RobertaConfig
|
||||
:members:
|
||||
|
||||
|
||||
``RobertaTokenizer``
|
||||
RobertaTokenizer
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.RobertaTokenizer
|
||||
:members:
|
||||
:members: build_inputs_with_special_tokens, get_special_tokens_mask,
|
||||
create_token_type_ids_from_sequences, save_vocabulary
|
||||
|
||||
|
||||
``RobertaModel``
|
||||
RobertaTokenizerFast
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.RobertaTokenizerFast
|
||||
:members: build_inputs_with_special_tokens
|
||||
|
||||
|
||||
RobertaModel
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.RobertaModel
|
||||
:members:
|
||||
|
||||
|
||||
``RobertaForMaskedLM``
|
||||
RobertaForMaskedLM
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.RobertaForMaskedLM
|
||||
:members:
|
||||
|
||||
|
||||
``RobertaForSequenceClassification``
|
||||
RobertaForSequenceClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.RobertaForSequenceClassification
|
||||
:members:
|
||||
|
||||
|
||||
``TFRobertaModel``
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
RobertaForMultipleChoice
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: pytorch_transformers.TFRobertaModel
|
||||
.. autoclass:: transformers.RobertaForMultipleChoice
|
||||
:members:
|
||||
|
||||
|
||||
``TFRobertaForMaskedLM``
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: pytorch_transformers.TFRobertaForMaskedLM
|
||||
:members:
|
||||
|
||||
|
||||
``TFRobertaForSequenceClassification``
|
||||
RobertaForTokenClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: pytorch_transformers.TFRobertaForSequenceClassification
|
||||
.. autoclass:: transformers.RobertaForTokenClassification
|
||||
:members:
|
||||
|
||||
|
||||
RobertaForQuestionAnswering
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.RobertaForQuestionAnswering
|
||||
:members:
|
||||
|
||||
|
||||
TFRobertaModel
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.TFRobertaModel
|
||||
:members:
|
||||
|
||||
|
||||
TFRobertaForMaskedLM
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.TFRobertaForMaskedLM
|
||||
:members:
|
||||
|
||||
|
||||
TFRobertaForSequenceClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.TFRobertaForSequenceClassification
|
||||
:members:
|
||||
|
||||
|
||||
TFRobertaForMultipleChoice
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.TFRobertaForMultipleChoice
|
||||
:members:
|
||||
|
||||
|
||||
TFRobertaForTokenClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.TFRobertaForTokenClassification
|
||||
:members:
|
||||
|
||||
|
||||
TFRobertaForQuestionAnswering
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.TFRobertaForQuestionAnswering
|
||||
:members:
|
||||
|
||||
105
docs/source/model_doc/t5.rst
Normal file
105
docs/source/model_doc/t5.rst
Normal file
@ -0,0 +1,105 @@
|
||||
T5
|
||||
----------------------------------------------------
|
||||
**DISCLAIMER:** This model is still a work in progress, if you see something strange,
|
||||
file a `Github Issue <https://github.com/huggingface/transformers/issues/new?assignees=&labels=&template=bug-report.md&title>`_
|
||||
|
||||
Overview
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The T5 model was presented in `Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer <https://arxiv.org/pdf/1910.10683.pdf>`_ by Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, Peter J. Liu in
|
||||
Here the abstract:
|
||||
|
||||
*Transfer learning, where a model is first pre-trained on a data-rich task before being fine-tuned on a downstream task, has emerged as a powerful technique in natural language processing (NLP). The effectiveness of transfer learning has given rise to a diversity of approaches, methodology, and practice.
|
||||
In this paper, we explore the landscape of transfer learning techniques for NLP by introducing a unified framework that converts every language problem into a text-to-text format.
|
||||
Our systematic study compares pre-training objectives, architectures, unlabeled datasets, transfer approaches, and other factors on dozens of language understanding tasks.
|
||||
By combining the insights from our exploration with scale and our new "Colossal Clean Crawled Corpus", we achieve state-of-the-art results on many benchmarks covering summarization, question answering, text classification, and more.
|
||||
To facilitate future work on transfer learning for NLP, we release our dataset, pre-trained models, and code.*
|
||||
|
||||
Tips:
|
||||
|
||||
- T5 is an encoder-decoder model pre-trained on a multi-task mixture of unsupervised
|
||||
and supervised tasks and for which each task is converted into a text-to-text format.
|
||||
T5 works well on a variety of tasks out-of-the-box by prepending a different prefix to the input corresponding to each task, e.g.: for translation: *translate English to German: ..., summarize: ...*.
|
||||
For more information about which prefix to use, it is easiest to look into Appendix D of the `paper <https://arxiv.org/pdf/1910.10683.pdf>`_ .
|
||||
- For sequence to sequence generation, it is recommended to use ``T5ForConditionalGeneration.generate()``. The method takes care of feeding the encoded input via cross-attention layers to the decoder and auto-regressively generates the decoder output.
|
||||
- T5 uses relative scalar embeddings. Encoder input padding can be done on the left and on the right.
|
||||
|
||||
The original code can be found `here <https://github.com/google-research/text-to-text-transfer-transformer>`_.
|
||||
|
||||
Training
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
T5 is an encoder-decoder model and converts all NLP problems into a text-to-text format. It is trained using teacher forcing.
|
||||
This means that for training we always need an input sequence and a target sequence.
|
||||
The input sequence is fed to the model using ``input_ids``. The target sequence is shifted to the right, *i.e.* prepended by a start-sequence token and fed to the decoder using the `decoder_input_ids`. In teacher-forcing style, the target sequence is then appended by the EOS token and corresponds to the ``labels``. The PAD token is hereby used as the start-sequence token.
|
||||
T5 can be trained / fine-tuned both in a supervised and unsupervised fashion.
|
||||
|
||||
- Unsupervised denoising training
|
||||
|
||||
In this setup spans of the input sequence are masked by so-called sentinel tokens (*a.k.a* unique mask tokens)
|
||||
and the output sequence is formed as a concatenation of the same sentinel tokens and the *real* masked tokens.
|
||||
Each sentinel token represents a unique mask token for this sentence and should start with ``<extra_id_1>``, ``<extra_id_2>``, ... up to ``<extra_id_100>``. As a default 100 sentinel tokens are available in ``T5Tokenizer``.
|
||||
*E.g.* the sentence "The cute dog walks in the park" with the masks put on "cute dog" and "the" should be processed as follows:
|
||||
|
||||
::
|
||||
|
||||
input_ids = tokenizer.encode('The <extra_id_1> walks in <extra_id_2> park', return_tensors='pt')
|
||||
labels = tokenizer.encode('<extra_id_1> cute dog <extra_id_2> the <extra_id_3> </s>', return_tensors='pt')
|
||||
# the forward function automatically creates the correct decoder_input_ids
|
||||
model(input_ids=input_ids, labels=labels)
|
||||
|
||||
- Supervised training
|
||||
|
||||
In this setup the input sequence and output sequence are standard sequence to sequence input output mapping.
|
||||
In translation, *e.g.* the input sequence "The house is wonderful." and output sequence "Das Haus ist wunderbar." should
|
||||
be processed as follows:
|
||||
|
||||
::
|
||||
|
||||
input_ids = tokenizer.encode('translate English to German: The house is wonderful. </s>', return_tensors='pt')
|
||||
labels = tokenizer.encode('Das Haus ist wunderbar. </s>', return_tensors='pt')
|
||||
# the forward function automatically creates the correct decoder_input_ids
|
||||
model(input_ids=input_ids, labels=labels)
|
||||
|
||||
|
||||
T5Config
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.T5Config
|
||||
:members:
|
||||
|
||||
|
||||
T5Tokenizer
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.T5Tokenizer
|
||||
:members: build_inputs_with_special_tokens, get_special_tokens_mask,
|
||||
create_token_type_ids_from_sequences, save_vocabulary
|
||||
|
||||
|
||||
T5Model
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.T5Model
|
||||
:members:
|
||||
|
||||
|
||||
T5ForConditionalGeneration
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.T5ForConditionalGeneration
|
||||
:members:
|
||||
|
||||
|
||||
TFT5Model
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.TFT5Model
|
||||
:members:
|
||||
|
||||
|
||||
TFT5ForConditionalGeneration
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.TFT5ForConditionalGeneration
|
||||
:members:
|
||||
@ -1,44 +1,82 @@
|
||||
Transformer XL
|
||||
----------------------------------------------------
|
||||
|
||||
Overview
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
``TransfoXLConfig``
|
||||
The Transformer-XL model was proposed in
|
||||
`Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context <https://arxiv.org/abs/1901.02860>`__
|
||||
by Zihang Dai*, Zhilin Yang*, Yiming Yang, Jaime Carbonell, Quoc V. Le, Ruslan Salakhutdinov.
|
||||
It's a causal (uni-directional) transformer with relative positioning (sinusoïdal) embeddings which can reuse
|
||||
previously computed hidden-states to attend to longer context (memory).
|
||||
This model also uses adaptive softmax inputs and outputs (tied).
|
||||
|
||||
The abstract from the paper is the following:
|
||||
|
||||
*Transformers have a potential of learning longer-term dependency, but are limited by a fixed-length context in the
|
||||
setting of language modeling. We propose a novel neural architecture Transformer-XL that enables learning dependency
|
||||
beyond a fixed length without disrupting temporal coherence. It consists of a segment-level recurrence mechanism and
|
||||
a novel positional encoding scheme. Our method not only enables capturing longer-term dependency, but also resolves
|
||||
the context fragmentation problem. As a result, Transformer-XL learns dependency that is 80% longer than RNNs and
|
||||
450% longer than vanilla Transformers, achieves better performance on both short and long sequences, and is up
|
||||
to 1,800+ times faster than vanilla Transformers during evaluation. Notably, we improve the state-of-the-art results
|
||||
of bpc/perplexity to 0.99 on enwiki8, 1.08 on text8, 18.3 on WikiText-103, 21.8 on One Billion Word, and 54.5 on
|
||||
Penn Treebank (without finetuning). When trained only on WikiText-103, Transformer-XL manages to generate reasonably
|
||||
coherent, novel text articles with thousands of tokens.*
|
||||
|
||||
Tips:
|
||||
|
||||
- Transformer-XL uses relative sinusoidal positional embeddings. Padding can be done on the left or on the right.
|
||||
The original implementation trains on SQuAD with padding on the left, therefore the padding defaults are set to left.
|
||||
- Transformer-XL is one of the few models that has no sequence length limit.
|
||||
|
||||
The original code can be found `here <https://github.com/kimiyoung/transformer-xl>`_.
|
||||
|
||||
|
||||
TransfoXLConfig
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.TransfoXLConfig
|
||||
:members:
|
||||
|
||||
|
||||
``TransfoXLTokenizer``
|
||||
TransfoXLTokenizer
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.TransfoXLTokenizer
|
||||
:members: save_vocabulary
|
||||
|
||||
|
||||
TransfoXLTokenizerFast
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.TransfoXLTokenizerFast
|
||||
:members:
|
||||
|
||||
|
||||
``TransfoXLModel``
|
||||
TransfoXLModel
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.TransfoXLModel
|
||||
:members:
|
||||
|
||||
|
||||
``TransfoXLLMHeadModel``
|
||||
TransfoXLLMHeadModel
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.TransfoXLLMHeadModel
|
||||
:members:
|
||||
|
||||
|
||||
``TFTransfoXLModel``
|
||||
TFTransfoXLModel
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: pytorch_transformers.TFTransfoXLModel
|
||||
.. autoclass:: transformers.TFTransfoXLModel
|
||||
:members:
|
||||
|
||||
|
||||
``TFTransfoXLLMHeadModel``
|
||||
TFTransfoXLLMHeadModel
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: pytorch_transformers.TFTransfoXLLMHeadModel
|
||||
.. autoclass:: transformers.TFTransfoXLLMHeadModel
|
||||
:members:
|
||||
|
||||
@ -1,69 +1,124 @@
|
||||
XLM
|
||||
----------------------------------------------------
|
||||
|
||||
``XLMConfig``
|
||||
Overview
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The XLM model was proposed in `Cross-lingual Language Model Pretraining <https://arxiv.org/abs/1901.07291>`_
|
||||
by Guillaume Lample*, Alexis Conneau*. It's a transformer pre-trained using one of the following objectives:
|
||||
|
||||
- a causal language modeling (CLM) objective (next token prediction),
|
||||
- a masked language modeling (MLM) objective (Bert-like), or
|
||||
- a Translation Language Modeling (TLM) object (extension of Bert's MLM to multiple language inputs)
|
||||
|
||||
The abstract from the paper is the following:
|
||||
|
||||
*Recent studies have demonstrated the efficiency of generative pretraining for English natural language understanding.
|
||||
In this work, we extend this approach to multiple languages and show the effectiveness of cross-lingual pretraining.
|
||||
We propose two methods to learn cross-lingual language models (XLMs): one unsupervised that only relies on monolingual
|
||||
data, and one supervised that leverages parallel data with a new cross-lingual language model objective. We obtain
|
||||
state-of-the-art results on cross-lingual classification, unsupervised and supervised machine translation. On XNLI,
|
||||
our approach pushes the state of the art by an absolute gain of 4.9% accuracy. On unsupervised machine translation,
|
||||
we obtain 34.3 BLEU on WMT'16 German-English, improving the previous state of the art by more than 9 BLEU. On
|
||||
supervised machine translation, we obtain a new state of the art of 38.5 BLEU on WMT'16 Romanian-English, outperforming
|
||||
the previous best approach by more than 4 BLEU. Our code and pretrained models will be made publicly available.*
|
||||
|
||||
Tips:
|
||||
|
||||
- XLM has many different checkpoints, which were trained using different objectives: CLM, MLM or TLM. Make sure to
|
||||
select the correct objective for your task (e.g. MLM checkpoints are not suitable for generation).
|
||||
- XLM has multilingual checkpoints which leverage a specific `lang` parameter. Check out the
|
||||
`multi-lingual <../multilingual.html>`__ page for more information.
|
||||
|
||||
The original code can be found `here <https://github.com/facebookresearch/XLM/>`_.
|
||||
|
||||
|
||||
XLMConfig
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.XLMConfig
|
||||
:members:
|
||||
|
||||
``XLMTokenizer``
|
||||
XLMTokenizer
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.XLMTokenizer
|
||||
:members:
|
||||
:members: build_inputs_with_special_tokens, get_special_tokens_mask,
|
||||
create_token_type_ids_from_sequences, save_vocabulary
|
||||
|
||||
``XLMModel``
|
||||
XLMModel
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.XLMModel
|
||||
:members:
|
||||
|
||||
|
||||
``XLMWithLMHeadModel``
|
||||
XLMWithLMHeadModel
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.XLMWithLMHeadModel
|
||||
:members:
|
||||
|
||||
|
||||
``XLMForSequenceClassification``
|
||||
XLMForSequenceClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.XLMForSequenceClassification
|
||||
:members:
|
||||
|
||||
|
||||
``XLMForQuestionAnswering``
|
||||
XLMForQuestionAnsweringSimple
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.XLMForQuestionAnsweringSimple
|
||||
:members:
|
||||
|
||||
|
||||
XLMForQuestionAnswering
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.XLMForQuestionAnswering
|
||||
:members:
|
||||
|
||||
|
||||
``TFXLMModel``
|
||||
TFXLMModel
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: pytorch_transformers.TFXLMModel
|
||||
.. autoclass:: transformers.TFXLMModel
|
||||
:members:
|
||||
|
||||
|
||||
``TFXLMWithLMHeadModel``
|
||||
TFXLMWithLMHeadModel
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: pytorch_transformers.TFXLMWithLMHeadModel
|
||||
.. autoclass:: transformers.TFXLMWithLMHeadModel
|
||||
:members:
|
||||
|
||||
|
||||
``TFXLMForSequenceClassification``
|
||||
TFXLMForSequenceClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: pytorch_transformers.TFXLMForSequenceClassification
|
||||
.. autoclass:: transformers.TFXLMForSequenceClassification
|
||||
:members:
|
||||
|
||||
|
||||
``TFXLMForQuestionAnsweringSimple``
|
||||
TFXLMForMultipleChoice
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: pytorch_transformers.TFXLMForQuestionAnsweringSimple
|
||||
.. autoclass:: transformers.TFXLMForMultipleChoice
|
||||
:members:
|
||||
|
||||
|
||||
TFXLMForTokenClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.TFXLMForTokenClassification
|
||||
:members:
|
||||
|
||||
|
||||
|
||||
TFXLMForQuestionAnsweringSimple
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.TFXLMForQuestionAnsweringSimple
|
||||
:members:
|
||||
|
||||
133
docs/source/model_doc/xlmroberta.rst
Normal file
133
docs/source/model_doc/xlmroberta.rst
Normal file
@ -0,0 +1,133 @@
|
||||
XLM-RoBERTa
|
||||
------------------------------------------
|
||||
|
||||
Overview
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The XLM-RoBERTa model was proposed in `Unsupervised Cross-lingual Representation Learning at Scale <https://arxiv.org/abs/1911.02116>`__
|
||||
by Alexis Conneau, Kartikay Khandelwal, Naman Goyal, Vishrav Chaudhary, Guillaume Wenzek, Francisco Guzmán,
|
||||
Edouard Grave, Myle Ott, Luke Zettlemoyer and Veselin Stoyanov. It is based on Facebook's RoBERTa model released in 2019.
|
||||
It is a large multi-lingual language model, trained on 2.5TB of filtered CommonCrawl data.
|
||||
|
||||
The abstract from the paper is the following:
|
||||
|
||||
*This paper shows that pretraining multilingual language models at scale leads to significant performance gains for
|
||||
a wide range of cross-lingual transfer tasks. We train a Transformer-based masked language model on one hundred
|
||||
languages, using more than two terabytes of filtered CommonCrawl data. Our model, dubbed XLM-R, significantly
|
||||
outperforms multilingual BERT (mBERT) on a variety of cross-lingual benchmarks, including +13.8% average accuracy
|
||||
on XNLI, +12.3% average F1 score on MLQA, and +2.1% average F1 score on NER. XLM-R performs particularly well on
|
||||
low-resource languages, improving 11.8% in XNLI accuracy for Swahili and 9.2% for Urdu over the previous XLM model.
|
||||
We also present a detailed empirical evaluation of the key factors that are required to achieve these gains,
|
||||
including the trade-offs between (1) positive transfer and capacity dilution and (2) the performance of high and
|
||||
low resource languages at scale. Finally, we show, for the first time, the possibility of multilingual modeling
|
||||
without sacrificing per-language performance; XLM-Ris very competitive with strong monolingual models on the GLUE
|
||||
and XNLI benchmarks. We will make XLM-R code, data, and models publicly available.*
|
||||
|
||||
Tips:
|
||||
|
||||
- XLM-R is a multilingual model trained on 100 different languages. Unlike some XLM multilingual models, it does
|
||||
not require `lang` tensors to understand which language is used, and should be able to determine the correct
|
||||
language from the input ids.
|
||||
- This implementation is the same as RoBERTa. Refer to the `documentation of RoBERTa <./roberta.html>`__ for usage
|
||||
examples as well as the information relative to the inputs and outputs.
|
||||
|
||||
The original code can be found `here <https://github.com/pytorch/fairseq/tree/master/examples/xlmr>`_.
|
||||
|
||||
|
||||
XLMRobertaConfig
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.XLMRobertaConfig
|
||||
:members:
|
||||
|
||||
|
||||
XLMRobertaTokenizer
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.XLMRobertaTokenizer
|
||||
:members: build_inputs_with_special_tokens, get_special_tokens_mask,
|
||||
create_token_type_ids_from_sequences, save_vocabulary
|
||||
|
||||
|
||||
XLMRobertaModel
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.XLMRobertaModel
|
||||
:members:
|
||||
|
||||
|
||||
XLMRobertaForMaskedLM
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.XLMRobertaForMaskedLM
|
||||
:members:
|
||||
|
||||
|
||||
XLMRobertaForSequenceClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.XLMRobertaForSequenceClassification
|
||||
:members:
|
||||
|
||||
|
||||
XLMRobertaForMultipleChoice
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.XLMRobertaForMultipleChoice
|
||||
:members:
|
||||
|
||||
|
||||
XLMRobertaForTokenClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.XLMRobertaForTokenClassification
|
||||
:members:
|
||||
|
||||
|
||||
XLMRobertaForQuestionAnswering
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.XLMRobertaForQuestionAnswering
|
||||
:members:
|
||||
|
||||
|
||||
TFXLMRobertaModel
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.TFXLMRobertaModel
|
||||
:members:
|
||||
|
||||
|
||||
TFXLMRobertaForMaskedLM
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.TFXLMRobertaForMaskedLM
|
||||
:members:
|
||||
|
||||
|
||||
TFXLMRobertaForSequenceClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.TFXLMRobertaForSequenceClassification
|
||||
:members:
|
||||
|
||||
|
||||
TFXLMRobertaForMultipleChoice
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.TFXLMRobertaForMultipleChoice
|
||||
:members:
|
||||
|
||||
|
||||
TFXLMRobertaForTokenClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.TFXLMRobertaForTokenClassification
|
||||
:members:
|
||||
|
||||
|
||||
TFXLMRobertaForQuestionAnswering
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.TFXLMRobertaForQuestionAnswering
|
||||
:members:
|
||||
@ -1,71 +1,141 @@
|
||||
XLNet
|
||||
----------------------------------------------------
|
||||
|
||||
``XLNetConfig``
|
||||
Overview
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The XLNet model was proposed in `XLNet: Generalized Autoregressive Pretraining for Language Understanding <https://arxiv.org/abs/1906.08237>`_
|
||||
by Zhilin Yang*, Zihang Dai*, Yiming Yang, Jaime Carbonell, Ruslan Salakhutdinov, Quoc V. Le.
|
||||
XLnet is an extension of the Transformer-XL model pre-trained using an autoregressive method
|
||||
to learn bidirectional contexts by maximizing the expected likelihood over all permutations
|
||||
of the input sequence factorization order.
|
||||
|
||||
The abstract from the paper is the following:
|
||||
|
||||
*With the capability of modeling bidirectional contexts, denoising autoencoding based pretraining like BERT achieves
|
||||
better performance than pretraining approaches based on autoregressive language modeling. However, relying on
|
||||
corrupting the input with masks, BERT neglects dependency between the masked positions and suffers from a
|
||||
pretrain-finetune discrepancy. In light of these pros and cons, we propose XLNet, a generalized autoregressive
|
||||
pretraining method that (1) enables learning bidirectional contexts by maximizing the expected likelihood over
|
||||
all permutations of the factorization order and (2) overcomes the limitations of BERT thanks to its autoregressive
|
||||
formulation. Furthermore, XLNet integrates ideas from Transformer-XL, the state-of-the-art autoregressive model,
|
||||
into pretraining. Empirically, under comparable experiment settings, XLNet outperforms BERT on 20 tasks, often by
|
||||
a large margin, including question answering, natural language inference, sentiment analysis, and document ranking.*
|
||||
|
||||
Tips:
|
||||
|
||||
- The specific attention pattern can be controlled at training and test time using the `perm_mask` input.
|
||||
- Due to the difficulty of training a fully auto-regressive model over various factorization order,
|
||||
XLNet is pretrained using only a sub-set of the output tokens as target which are selected
|
||||
with the `target_mapping` input.
|
||||
- To use XLNet for sequential decoding (i.e. not in fully bi-directional setting), use the `perm_mask` and
|
||||
`target_mapping` inputs to control the attention span and outputs (see examples in `examples/text-generation/run_generation.py`)
|
||||
- XLNet is one of the few models that has no sequence length limit.
|
||||
|
||||
The original code can be found `here <https://github.com/zihangdai/xlnet/>`_.
|
||||
|
||||
|
||||
XLNetConfig
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.XLNetConfig
|
||||
:members:
|
||||
|
||||
|
||||
``XLNetTokenizer``
|
||||
XLNetTokenizer
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.XLNetTokenizer
|
||||
:members:
|
||||
:members: build_inputs_with_special_tokens, get_special_tokens_mask,
|
||||
create_token_type_ids_from_sequences, save_vocabulary
|
||||
|
||||
|
||||
``XLNetModel``
|
||||
XLNetModel
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.XLNetModel
|
||||
:members:
|
||||
|
||||
|
||||
``XLNetLMHeadModel``
|
||||
XLNetLMHeadModel
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.XLNetLMHeadModel
|
||||
:members:
|
||||
|
||||
|
||||
``XLNetForSequenceClassification``
|
||||
XLNetForSequenceClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.XLNetForSequenceClassification
|
||||
:members:
|
||||
|
||||
|
||||
``XLNetForQuestionAnswering``
|
||||
XLNetForMultipleChoice
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.XLNetForMultipleChoice
|
||||
:members:
|
||||
|
||||
|
||||
XLNetForTokenClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.XLNetForTokenClassification
|
||||
:members:
|
||||
|
||||
|
||||
XLNetForQuestionAnsweringSimple
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.XLNetForQuestionAnsweringSimple
|
||||
:members:
|
||||
|
||||
|
||||
XLNetForQuestionAnswering
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.XLNetForQuestionAnswering
|
||||
:members:
|
||||
|
||||
|
||||
``TFXLNetModel``
|
||||
TFXLNetModel
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: pytorch_transformers.TFXLNetModel
|
||||
.. autoclass:: transformers.TFXLNetModel
|
||||
:members:
|
||||
|
||||
|
||||
``TFXLNetLMHeadModel``
|
||||
TFXLNetLMHeadModel
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: pytorch_transformers.TFXLNetLMHeadModel
|
||||
.. autoclass:: transformers.TFXLNetLMHeadModel
|
||||
:members:
|
||||
|
||||
|
||||
``TFXLNetForSequenceClassification``
|
||||
TFXLNetForSequenceClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: pytorch_transformers.TFXLNetForSequenceClassification
|
||||
.. autoclass:: transformers.TFXLNetForSequenceClassification
|
||||
:members:
|
||||
|
||||
|
||||
``TFXLNetForQuestionAnsweringSimple``
|
||||
TFLNetForMultipleChoice
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: pytorch_transformers.TFXLNetForQuestionAnsweringSimple
|
||||
.. autoclass:: transformers.TFXLNetForMultipleChoice
|
||||
:members:
|
||||
|
||||
|
||||
TFXLNetForTokenClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.TFXLNetForTokenClassification
|
||||
:members:
|
||||
|
||||
|
||||
TFXLNetForQuestionAnsweringSimple
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.TFXLNetForQuestionAnsweringSimple
|
||||
:members:
|
||||
|
||||
209
docs/source/model_sharing.rst
Normal file
209
docs/source/model_sharing.rst
Normal file
@ -0,0 +1,209 @@
|
||||
Model sharing and uploading
|
||||
===========================
|
||||
|
||||
In this page, we will show you how to share a model you have trained or fine-tuned on new data with the community on
|
||||
the `model hub <https://huggingface.co/models>`__.
|
||||
|
||||
.. note::
|
||||
|
||||
You will need to create an account on `huggingface.co <https://huggingface.co/join>`__ for this.
|
||||
|
||||
Optionally, you can join an existing organization or create a new one.
|
||||
|
||||
Prepare your model for uploading
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
We have seen in the :doc:`training tutorial <training>`: how to fine-tune a model on a given task. You have probably
|
||||
done something similar on your task, either using the model directly in your own training loop or using the
|
||||
:class:`~.transformers.Trainer`/:class:`~.transformers.TFTrainer` class. Let's see how you can share the result on
|
||||
the `model hub <https://huggingface.co/models>`__.
|
||||
|
||||
Basic steps
|
||||
^^^^^^^^^^^
|
||||
|
||||
..
|
||||
When #5258 is merged, we can remove the need to create the directory.
|
||||
|
||||
First, pick a directory with the name you want your model to have on the model hub (its full name will then be
|
||||
`username/awesome-name-you-picked` or `organization/awesome-name-you-picked`) and create it with either
|
||||
|
||||
::
|
||||
|
||||
mkdir path/to/awesome-name-you-picked
|
||||
|
||||
or in python
|
||||
|
||||
::
|
||||
|
||||
import os
|
||||
os.makedirs("path/to/awesome-name-you-picked")
|
||||
|
||||
then you can save your model and tokenizer with:
|
||||
|
||||
::
|
||||
|
||||
model.save_pretrained("path/to/awesome-name-you-picked")
|
||||
tokenizer.save_pretrained("path/to/awesome-name-you-picked")
|
||||
|
||||
Or, if you're using the Trainer API
|
||||
|
||||
::
|
||||
|
||||
trainer.save_model("path/to/awesome-name-you-picked")
|
||||
tokenizer.save_pretrained("path/to/awesome-name-you-picked")
|
||||
|
||||
Make your model work on all frameworks
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
..
|
||||
TODO Sylvain: make this automatic during the upload
|
||||
|
||||
You probably have your favorite framework, but so will other users! That's why it's best to upload your model with both
|
||||
PyTorch `and` TensorFlow checkpoints to make it easier to use (if you skip this step, users will still be able to load
|
||||
your model in another framework, but it will be slower, as it will have to be converted on the fly). Don't worry, it's super easy to do (and in a future version,
|
||||
it will all be automatic). You will need to install both PyTorch and TensorFlow for this step, but you don't need to
|
||||
worry about the GPU, so it should be very easy. Check the
|
||||
`TensorFlow installation page <https://www.tensorflow.org/install/pip#tensorflow-2.0-rc-is-available>`__
|
||||
and/or the `PyTorch installation page <https://pytorch.org/get-started/locally/#start-locally>`__ to see how.
|
||||
|
||||
First check that your model class exists in the other framework, that is try to import the same model by either adding
|
||||
or removing TF. For instance, if you trained a :class:`~transformers.DistilBertForSequenceClassification`, try to
|
||||
type
|
||||
|
||||
::
|
||||
|
||||
from transformers import TFDistilBertForSequenceClassification
|
||||
|
||||
and if you trained a :class:`~transformers.TFDistilBertForSequenceClassification`, try to
|
||||
type
|
||||
|
||||
::
|
||||
|
||||
from transformers import DistilBertForSequenceClassification
|
||||
|
||||
This will give back an error if your model does not exist in the other framework (something that should be pretty rare
|
||||
since we're aiming for full parity between the two frameworks). In this case, skip this and go to the next step.
|
||||
|
||||
Now, if you trained your model in PyTorch and have to create a TensorFlow version, adapt the following code to your
|
||||
model class:
|
||||
|
||||
::
|
||||
|
||||
tf_model = TFDistilBertForSequenceClassification.from_pretrained("path/to/awesome-name-you-picked", from_pt=True)
|
||||
tf_model.save_pretrained("path/to/awesome-name-you-picked")
|
||||
|
||||
and if you trained your model in TensorFlow and have to create a PyTorch version, adapt the following code to your
|
||||
model class:
|
||||
|
||||
::
|
||||
|
||||
pt_model = DistilBertForSequenceClassification.from_pretrained("path/to/awesome-name-you-picked", from_tf=True)
|
||||
pt_model.save_pretrained("path/to/awesome-name-you-picked")
|
||||
|
||||
That's all there is to it!
|
||||
|
||||
Check the directory before uploading
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Make sure there are no garbage files in the directory you'll upload. It should only have:
|
||||
|
||||
- a `config.json` file, which saves the :doc:`configuration <main_classes/configuration>` of your model ;
|
||||
- a `pytorch_model.bin` file, which is the PyTorch checkpoint (unless you can't have it for some reason) ;
|
||||
- a `tf_model.h5` file, which is the TensorFlow checkpoint (unless you can't have it for some reason) ;
|
||||
- a `special_tokens_map.json`, which is part of your :doc:`tokenizer <main_classes/tokenizer>` save;
|
||||
- a `tokenizer_config.json`, which is part of your :doc:`tokenizer <main_classes/tokenizer>` save;
|
||||
- a `vocab.txt`, which is the vocabulary of your tokenizer, part of your :doc:`tokenizer <main_classes/tokenizer>`
|
||||
save;
|
||||
- maybe a `added_tokens.json`, which is part of your :doc:`tokenizer <main_classes/tokenizer>` save.
|
||||
|
||||
Other files can safely be deleted.
|
||||
|
||||
Upload your model with the CLI
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Now go in a terminal and run the following command. It should be in the virtual enviromnent where you installed 🤗
|
||||
Transformers, since that command :obj:`transformers-cli` comes from the library.
|
||||
|
||||
::
|
||||
|
||||
transformers-cli login
|
||||
|
||||
Then log in using the same credentials as on huggingface.co. To upload your model, just type
|
||||
|
||||
::
|
||||
|
||||
transformers-cli upload path/to/awesome-name-you-picked/
|
||||
|
||||
This will upload the folder containing the weights, tokenizer and configuration we prepared in the previous section.
|
||||
|
||||
If you want to upload a single file (a new version of your model, or the other framework checkpoint you want to add),
|
||||
just type:
|
||||
|
||||
::
|
||||
|
||||
transformers-cli upload path/to/awesome-name-you-picked/that-file
|
||||
|
||||
or
|
||||
|
||||
::
|
||||
|
||||
transformers-cli upload path/to/awesome-name-you-picked/that-file --filename awesome-name-you-picked/new_name
|
||||
|
||||
if you want to change its filename.
|
||||
|
||||
This uploads the model to your personal account. If you want your model to be namespaced by your organization name
|
||||
rather than your username, add the following flag to any command:
|
||||
|
||||
::
|
||||
|
||||
--organization organization_name
|
||||
|
||||
so for instance:
|
||||
|
||||
::
|
||||
|
||||
transformers-cli upload path/to/awesome-name-you-picked/ --organization organization_name
|
||||
|
||||
Your model will then be accessible through its identifier, which is, as we saw above,
|
||||
`username/awesome-name-you-picked` or `organization/awesome-name-you-picked`.
|
||||
|
||||
Add a model card
|
||||
^^^^^^^^^^^^^^^^
|
||||
|
||||
To make sure everyone knows what your model can do, what its limitations and potential bias or ethetical
|
||||
considerations, please add a README.md model card to the 🤗 Transformers repo under `model_cards/`. It should be named
|
||||
`README.md` and follow `this template <https://github.com/huggingface/model_card>`__.
|
||||
|
||||
If your model is fine-tuned from another model coming from the model hub (all 🤗 Transformers pretrained models do),
|
||||
don't forget to link to its model card so that people can fully trace how your model was built.
|
||||
|
||||
If you have never made a pull request to the 🤗 Transformers repo, look at the
|
||||
:doc:`contributing guide <contributing>` to see the steps to follow.
|
||||
|
||||
Using your model
|
||||
^^^^^^^^^^^^^^^^
|
||||
|
||||
Your model now has a page on huggingface.co/models 🔥
|
||||
|
||||
Anyone can load it from code:
|
||||
|
||||
::
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained("namespace/awesome-name-you-picked")
|
||||
model = AutoModel.from_pretrained("namespace/awesome-name-you-picked")
|
||||
|
||||
Additional commands
|
||||
^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
You can list all the files you uploaded on the hub like this:
|
||||
|
||||
::
|
||||
|
||||
transformers-cli s3 ls
|
||||
|
||||
You can also delete unneeded files with
|
||||
|
||||
::
|
||||
|
||||
transformers-cli s3 rm awesome-name-you-picked/filename
|
||||
|
||||
618
docs/source/model_summary.rst
Normal file
618
docs/source/model_summary.rst
Normal file
@ -0,0 +1,618 @@
|
||||
Summary of the models
|
||||
================================================
|
||||
|
||||
This is a summary of the models available in 🤗 Transformers. It assumes you’re familiar with the original
|
||||
`transformer model <https://arxiv.org/abs/1706.03762>`_. For a gentle introduction check the `annotated transformer
|
||||
<http://nlp.seas.harvard.edu/2018/04/03/attention.html>`_. Here we focus on the high-level differences between the
|
||||
models. You can check them more in detail in their respective documentation. Also checkout the
|
||||
:doc:`pretrained model page </pretrained_models>` to see the checkpoints available for each type of model and all `the
|
||||
community models <https://huggingface.co/models>`_.
|
||||
|
||||
Each one of the models in the library falls into one of the following categories:
|
||||
|
||||
* :ref:`autoregressive-models`
|
||||
* :ref:`autoencoding-models`
|
||||
* :ref:`seq-to-seq-models`
|
||||
* :ref:`multimodal-models`
|
||||
|
||||
Autoregressive models are pretrained on the classic language modeling task: guess the next token having read all the
|
||||
previous ones. They correspond to the decoder of the original transformer model, and a mask is used on top of the full
|
||||
sentence so that the attention heads can only see what was before in the next, and not what’s after. Although those
|
||||
models can be fine-tuned and achieve great results on many tasks, the most natural application is text generation.
|
||||
A typical example of such models is GPT.
|
||||
|
||||
Autoencoding models are pretrained by corrupting the input tokens in some way and trying to reconstruct the original
|
||||
sentence. They correspond to the encoder of the original transformer model in the sense that they get access to the
|
||||
full inputs without any mask. Those models usually build a bidirectional representation of the whole sentence. They can
|
||||
be fine-tuned and achieve great results on many tasks such as text generation, but their most natural application is
|
||||
sentence classification or token classification. A typical example of such models is BERT.
|
||||
|
||||
Note that the only difference between autoregressive models and autoencoding models is in the way the model is
|
||||
pretrained. Therefore, the same architecture can be used for both autoregressive and autoencoding models. When a given
|
||||
model has been used for both pretraining, we have put it in the category corresponding to the article it was first
|
||||
introduced.
|
||||
|
||||
Sequence-to-sequence models use both the encoder and the decoder of the original transformer, either for translation
|
||||
tasks or by transforming other tasks to sequence-to-sequence problems. They can be fine-tuned to many tasks but their
|
||||
most natural applications are translation, summarization and question answering. The original transformer model is an
|
||||
example of such a model (only for translation), T5 is an example that can be fine-tuned on other tasks.
|
||||
|
||||
Multimodal models mix text inputs with other kinds (like image) and are more specific to a given task.
|
||||
|
||||
.. _autoregressive-models:
|
||||
|
||||
Autoregressive models
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
As mentioned before, these models rely on the decoder part of the original transformer and use an attention mask so
|
||||
that at each position, the model can only look at the tokens before in the attention heads.
|
||||
|
||||
Original GPT
|
||||
----------------------------------------------
|
||||
|
||||
.. raw:: html
|
||||
|
||||
<a href="https://huggingface.co/models?filter=openai-gpt">
|
||||
<img alt="Models" src="https://img.shields.io/badge/All_model_pages-openai--gpt-blueviolet">
|
||||
</a>
|
||||
<a href="/model_doc/gpt">
|
||||
<img alt="Doc" src="https://img.shields.io/badge/Model_documentation-openai--gpt-blueviolet">
|
||||
</a>
|
||||
|
||||
`Improving Language Understanding by Generative Pre-Training <https://cdn.openai.com/research-covers/language-unsupervised/language_understanding_paper.pdf>`_,
|
||||
Alec Radford et al.
|
||||
|
||||
The first autoregressive model based on the transformer architecture, pretrained on the Book Corpus dataset.
|
||||
|
||||
The library provides versions of the model for language modeling and multitask language modeling/multiple choice
|
||||
classification.
|
||||
|
||||
GPT-2
|
||||
----------------------------------------------
|
||||
|
||||
.. raw:: html
|
||||
|
||||
<a href="https://huggingface.co/models?filter=gpt2">
|
||||
<img alt="Models" src="https://img.shields.io/badge/All_model_pages-gpt2-blueviolet">
|
||||
</a>
|
||||
<a href="/model_doc/gpt2">
|
||||
<img alt="Doc" src="https://img.shields.io/badge/Model_documentation-gpt2-blueviolet">
|
||||
</a>
|
||||
|
||||
`Language Models are Unsupervised Multitask Learners <https://d4mucfpksywv.cloudfront.net/better-language-models/language_models_are_unsupervised_multitask_learners.pdf>`_,
|
||||
Alec Radford et al.
|
||||
|
||||
A bigger and better version of GPT, pretrained on WebText (web pages from outgoing links in Reddit with 3 karmas or
|
||||
more).
|
||||
|
||||
The library provides versions of the model for language modeling and multitask language modeling/multiple choice
|
||||
classification.
|
||||
|
||||
CTRL
|
||||
----------------------------------------------
|
||||
|
||||
.. raw:: html
|
||||
|
||||
<a href="https://huggingface.co/models?filter=ctrl">
|
||||
<img alt="Models" src="https://img.shields.io/badge/All_model_pages-ctrl-blueviolet">
|
||||
</a>
|
||||
<a href="/model_doc/ctrl">
|
||||
<img alt="Doc" src="https://img.shields.io/badge/Model_documentation-ctrl-blueviolet">
|
||||
</a>
|
||||
|
||||
`CTRL: A Conditional Transformer Language Model for Controllable Generation <https://arxiv.org/abs/1909.05858>`_,
|
||||
Nitish Shirish Keskar et al.
|
||||
|
||||
Same as the GPT model but adds the idea of control codes. Text is generated from a prompt (can be empty) and one (or
|
||||
several) of those control codes which are then used to influence the text generation: generate with the style of
|
||||
wikipedia article, a book or a movie review.
|
||||
|
||||
The library provides a version of the model for language modeling only.
|
||||
|
||||
Transformer-XL
|
||||
----------------------------------------------
|
||||
|
||||
.. raw:: html
|
||||
|
||||
<a href="https://huggingface.co/models?filter=transfo-xl">
|
||||
<img alt="Models" src="https://img.shields.io/badge/All_model_pages-transfo--xl-blueviolet">
|
||||
</a>
|
||||
<a href="/model_doc/transformerxl">
|
||||
<img alt="Doc" src="https://img.shields.io/badge/Model_documentation-transfo--xl-blueviolet">
|
||||
</a>
|
||||
|
||||
`Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context <https://arxiv.org/abs/1901.02860>`_,
|
||||
Zihang Dai et al.
|
||||
|
||||
Same as a regular GPT model, but introduces a recurrence mechanism for two consecutive segments (similar to a regular
|
||||
RNNs with two consecutive inputs). In this context, a segment is a number of consecutive tokens (for instance 512) that
|
||||
may span across multiple documents, and segments are fed in order to the model.
|
||||
|
||||
Basically, the hidden states of the previous segment are concatenated to the current input to compute the attention
|
||||
scores. This allows the model to pay attention to information that was in the previous segment as well as the current
|
||||
one. By stacking multiple attention layers, the receptive field can be increased to multiple previous segments.
|
||||
|
||||
This changes the positional embeddings to positional relative embeddings (as the regular positional embeddings would
|
||||
give the same results in the current input and the current hidden state at a given position) and needs to make some
|
||||
adjustments in the way attention scores are computed.
|
||||
|
||||
The library provides a version of the model for language modeling only.
|
||||
|
||||
.. _reformer:
|
||||
|
||||
Reformer
|
||||
----------------------------------------------
|
||||
|
||||
.. raw:: html
|
||||
|
||||
<a href="https://huggingface.co/models?filter=reformer">
|
||||
<img alt="Models" src="https://img.shields.io/badge/All_model_pages-reformer-blueviolet">
|
||||
</a>
|
||||
<a href="/model_doc/reformer">
|
||||
<img alt="Doc" src="https://img.shields.io/badge/Model_documentation-reformer-blueviolet">
|
||||
</a>
|
||||
|
||||
`Reformer: The Efficient Transformer <https://arxiv.org/abs/2001.04451>`_,
|
||||
Nikita Kitaev et al .
|
||||
|
||||
An autoregressive transformer model with lots of tricks to reduce memory footprint and compute time. Those tricks
|
||||
include:
|
||||
|
||||
* Use :ref:`Axial position encoding <axial-pos-encoding>` (see below for more details). It’s a mechanism to avoid
|
||||
having a huge positional encoding matrix (when the sequence length is very big) by factorizing it in smaller
|
||||
matrices.
|
||||
* Replace traditional attention by :ref:`LSH (local-sensitive hashing) attention <lsh-attention>` (see below for more
|
||||
details). It's a technique to avoid compute the full product query-key in the attention layers.
|
||||
* Avoid storing the intermediate results of each layer by using reversible transformer layers to obtain them during
|
||||
the backward pass (subtracting the residuals from the input of the next layer gives them back) or recomputing them
|
||||
for results inside a given layer (less efficient than storing them but saves memory).
|
||||
* Compute the feedforward operations by chunks and not on the whole batch.
|
||||
|
||||
With those tricks, the model can be fed much larger sentences than traditional transformer autoregressive models.
|
||||
|
||||
**Note:** This model could be very well be used in an autoencoding setting, there is no checkpoint for such a
|
||||
pretraining yet, though.
|
||||
|
||||
The library provides a version of the model for language modeling only.
|
||||
|
||||
XLNet
|
||||
----------------------------------------------
|
||||
|
||||
.. raw:: html
|
||||
|
||||
<a href="https://huggingface.co/models?filter=xlnet">
|
||||
<img alt="Models" src="https://img.shields.io/badge/All_model_pages-xlnet-blueviolet">
|
||||
</a>
|
||||
<a href="/model_doc/xlnet">
|
||||
<img alt="Doc" src="https://img.shields.io/badge/Model_documentation-xlnet-blueviolet">
|
||||
</a>
|
||||
|
||||
`XLNet: Generalized Autoregressive Pretraining for Language Understanding <https://arxiv.org/abs/1906.08237>`_,
|
||||
Zhilin Yang et al.
|
||||
|
||||
XLNet is not a traditional autoregressive model but uses a training strategy that builds on that. It permutes the
|
||||
tokens in the sentence, then allows the model to use the last n tokens to predict the token n+1. Since this is all done
|
||||
with a mask, the sentence is actually fed in the model in the right order, but instead of masking the first n tokens
|
||||
for n+1, XLNet uses a mask that hides the previous tokens in some given permutation of 1,...,sequence length.
|
||||
|
||||
XLNet also uses the same recurrence mechanism as TransformerXL to build long-term dependencies.
|
||||
|
||||
The library provides a version of the model for language modeling, token classification, sentence classification,
|
||||
multiple choice classification and question answering.
|
||||
|
||||
.. _autoencoding-models:
|
||||
|
||||
Autoencoding models
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
As mentioned before, these models rely on the encoder part of the original transformer and use no mask so the model can
|
||||
look at all the tokens in the attention heads. For pretraining, inputs are a corrupted version of the sentence, usually
|
||||
obtained by masking tokens, and targets are the original sentences.
|
||||
|
||||
BERT
|
||||
----------------------------------------------
|
||||
|
||||
.. raw:: html
|
||||
|
||||
<a href="https://huggingface.co/models?filter=bert">
|
||||
<img alt="Models" src="https://img.shields.io/badge/All_model_pages-bert-blueviolet">
|
||||
</a>
|
||||
<a href="/model_doc/bert">
|
||||
<img alt="Doc" src="https://img.shields.io/badge/Model_documentation-bert-blueviolet">
|
||||
</a>
|
||||
|
||||
`BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding <https://arxiv.org/abs/1810.04805>`_,
|
||||
Jacob Devlin et al.
|
||||
|
||||
Corrupts the inputs by using random masking, more precisely, during pretraining, a given percentage of tokens (usually
|
||||
15%) are masked by
|
||||
|
||||
* a special mask token with probability 0.8
|
||||
* a random token different from the one masked with probability 0.1
|
||||
* the same token with probability 0.1
|
||||
|
||||
The model must predict the original sentence, but has a second objective: inputs are two sentences A and B (with a
|
||||
separation token in between). With probability 50%, the sentences are consecutive in the corpus, in the remaining 50%
|
||||
they are not related. The model has to predict if the sentences are consecutive or not.
|
||||
|
||||
The library provides a version of the model for language modeling (traditional or masked), next sentence prediction,
|
||||
token classification, sentence classification, multiple choice classification and question answering.
|
||||
|
||||
ALBERT
|
||||
----------------------------------------------
|
||||
|
||||
.. raw:: html
|
||||
|
||||
<a href="https://huggingface.co/models?filter=albert">
|
||||
<img alt="Models" src="https://img.shields.io/badge/All_model_pages-albert-blueviolet">
|
||||
</a>
|
||||
<a href="/model_doc/albert">
|
||||
<img alt="Doc" src="https://img.shields.io/badge/Model_documentation-albert-blueviolet">
|
||||
</a>
|
||||
|
||||
`ALBERT: A Lite BERT for Self-supervised Learning of Language Representations <https://arxiv.org/abs/1909.11942>`_,
|
||||
Zhenzhong Lan et al.
|
||||
|
||||
Same as BERT but with a few tweaks:
|
||||
|
||||
* Embedding size E is different from hidden size H justified because the embeddings are context independent (one
|
||||
embedding vector represents one token) whereas hidden states are context dependent (one hidden state represents a
|
||||
sequence of tokens) so it's more logical to have H >> E. Als, the embedding matrix is large since it's V x E (V
|
||||
being the vocab size). If E < H, it has less parameters.
|
||||
* Layers are split in groups that share parameters (to save memory).
|
||||
* Next sentence prediction is replaced by a sentence ordering prediction: in the inputs, we have two sentences A et B
|
||||
(that are consecutive) and we either feed A followed by B or B followed by A. The model must predict if they have
|
||||
been swapped or not.
|
||||
|
||||
The library provides a version of the model for masked language modeling, token classification, sentence
|
||||
classification, multiple choice classification and question answering.
|
||||
|
||||
RoBERTa
|
||||
----------------------------------------------
|
||||
|
||||
.. raw:: html
|
||||
|
||||
<a href="https://huggingface.co/models?filter=roberta">
|
||||
<img alt="Models" src="https://img.shields.io/badge/All_model_pages-roberta-blueviolet">
|
||||
</a>
|
||||
<a href="/model_doc/roberta">
|
||||
<img alt="Doc" src="https://img.shields.io/badge/Model_documentation-roberta-blueviolet">
|
||||
</a>
|
||||
|
||||
`RoBERTa: A Robustly Optimized BERT Pretraining Approach <https://arxiv.org/abs/1907.11692>`_,
|
||||
Yinhan Liu et al.
|
||||
|
||||
Same as BERT with better pretraining tricks:
|
||||
|
||||
* dynamic masking: tokens are masked differently at each epoch whereas BERT does it once and for all
|
||||
* no NSP (next sentence prediction) loss and instead of putting just two sentences together, put a chunk of
|
||||
contiguous texts together to reach 512 tokens (so sentences in in an order than may span other several documents)
|
||||
* train with larger batches
|
||||
* use BPE with bytes as a subunit and not characters (because of unicode characters)
|
||||
|
||||
The library provides a version of the model for masked language modeling, token classification, sentence
|
||||
classification, multiple choice classification and question answering.
|
||||
|
||||
DistilBERT
|
||||
----------------------------------------------
|
||||
|
||||
.. raw:: html
|
||||
|
||||
<a href="https://huggingface.co/models?filter=distilbert">
|
||||
<img alt="Models" src="https://img.shields.io/badge/All_model_pages-distilbert-blueviolet">
|
||||
</a>
|
||||
<a href="/model_doc/distilbert">
|
||||
<img alt="Doc" src="https://img.shields.io/badge/Model_documentation-distilbert-blueviolet">
|
||||
</a>
|
||||
|
||||
`DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter <https://arxiv.org/abs/1910.01108>`_,
|
||||
Victor Sanh et al.
|
||||
|
||||
Same as BERT but smaller. Trained by distillation of the pretrained BERT model, meaning it's been trained to predict
|
||||
the same probabilities as the larger model. The actual objective is a combination of:
|
||||
|
||||
* finding the same probabilities as the teacher model
|
||||
* predicting the masked tokens correctly (but no next-sentence objective)
|
||||
* a cosine similarity between the hidden states of the student and the teacher model
|
||||
|
||||
The library provides a version of the model for masked language modeling, token classification, sentence classification
|
||||
and question answering.
|
||||
|
||||
XLM
|
||||
----------------------------------------------
|
||||
|
||||
.. raw:: html
|
||||
|
||||
<a href="https://huggingface.co/models?filter=xlm">
|
||||
<img alt="Models" src="https://img.shields.io/badge/All_model_pages-xlm-blueviolet">
|
||||
</a>
|
||||
<a href="/model_doc/xlm">
|
||||
<img alt="Doc" src="https://img.shields.io/badge/Model_documentation-xlm-blueviolet">
|
||||
</a>
|
||||
|
||||
`Cross-lingual Language Model Pretraining <https://arxiv.org/abs/1901.07291>`_, Guillaume Lample and Alexis Conneau
|
||||
|
||||
A transformer model trained on several languages. There are three different type of training for this model and the
|
||||
library provides checkpoints for all of them:
|
||||
|
||||
* Causal language modeling (CLM) which is the traditional autoregressive training (so this model could be in the
|
||||
previous section as well). One of the languages is selected for each training sample, and the model input is a
|
||||
sentence of 256 tokens that may span on several documents in one one those languages.
|
||||
* Masked language modeling (MLM) which is like RoBERTa. One of the languages is selected for each training sample,
|
||||
and the model input is a sentence of 256 tokens that may span on several documents in one one those languages, with
|
||||
dynamic masking of the tokens.
|
||||
* A combination of MLM and translation language modeling (TLM). This consists of concatenating a sentence in two
|
||||
different languages, with random masking. To predict one of the masked token, the model can use both the
|
||||
surrounding context in language 1 as well as the context given by language 2.
|
||||
|
||||
Checkpoints refer to which method was used for pretraining by having `clm`, `mlm` or `mlm-tlm` in their names. On top
|
||||
of positional embeddings, the model has language embeddings. When training using MLM/CLM, this gives the model an
|
||||
indication of the language used, and when training using MLM+TLM, an indication of which part of the input is in which
|
||||
language.
|
||||
|
||||
The library provides a version of the model for language modeling, token classification, sentence classification and
|
||||
question answering.
|
||||
|
||||
XLM-RoBERTa
|
||||
----------------------------------------------
|
||||
|
||||
.. raw:: html
|
||||
|
||||
<a href="https://huggingface.co/models?filter=xlm-roberta">
|
||||
<img alt="Models" src="https://img.shields.io/badge/All_model_pages-xlm--roberta-blueviolet">
|
||||
</a>
|
||||
<a href="/model_doc/xlmroberta">
|
||||
<img alt="Doc" src="https://img.shields.io/badge/Model_documentation-xlm--roberta-blueviolet">
|
||||
</a>
|
||||
|
||||
`Unsupervised Cross-lingual Representation Learning at Scale <https://arxiv.org/abs/1911.02116>`_, Alexis Conneau et
|
||||
al.
|
||||
|
||||
Uses RoBERTa tricks on the XLM approach, but does not use the translation language modeling objective, only using
|
||||
masked language modeling on sentences coming from one language. However, the model is trained on many more languages
|
||||
(100) and doesn't use the language embeddings, so it's capable of detecting the input language by itself.
|
||||
|
||||
The library provides a version of the model for masked language modeling, token classification, sentence
|
||||
classification, multiple choice classification and question answering.
|
||||
|
||||
FlauBERT
|
||||
----------------------------------------------
|
||||
|
||||
.. raw:: html
|
||||
|
||||
<a href="https://huggingface.co/models?filter=flaubert">
|
||||
<img alt="Models" src="https://img.shields.io/badge/All_model_pages-flaubert-blueviolet">
|
||||
</a>
|
||||
<a href="/model_doc/flaubert">
|
||||
<img alt="Doc" src="https://img.shields.io/badge/Model_documentation-flaubert-blueviolet">
|
||||
</a>
|
||||
|
||||
`FlauBERT: Unsupervised Language Model Pre-training for French <https://arxiv.org/abs/1912.05372>`_, Hang Le et al.
|
||||
|
||||
Like RoBERTa, without the sentence ordering prediction (so just trained on the MLM objective).
|
||||
|
||||
The library provides a version of the model for language modeling and sentence classification.
|
||||
|
||||
ELECTRA
|
||||
----------------------------------------------
|
||||
|
||||
.. raw:: html
|
||||
|
||||
<a href="https://huggingface.co/models?filter=electra">
|
||||
<img alt="Models" src="https://img.shields.io/badge/All_model_pages-electra-blueviolet">
|
||||
</a>
|
||||
<a href="/model_doc/electra">
|
||||
<img alt="Doc" src="https://img.shields.io/badge/Model_documentation-electra-blueviolet">
|
||||
</a>
|
||||
|
||||
`ELECTRA: Pre-training Text Encoders as Discriminators Rather Than Generators <https://arxiv.org/abs/2003.10555>`_,
|
||||
Kevin Clark et al.
|
||||
|
||||
ELECTRA is a transformer model pretrained with the use of another (small) masked language model. The inputs are
|
||||
corrupted by that language model, which takes an input text that is randomly masked and outputs a text in which ELECTRA
|
||||
has to predict which token is an original and which one has been replaced. Like for GAN training, the small language
|
||||
model is trained for a few steps (but with the original texts as objective, not to fool the ELECTRA model like in a
|
||||
traditional GAN setting) then the ELECTRA model is trained for a few steps.
|
||||
|
||||
The library provides a version of the model for masked language modeling, token classification and sentence
|
||||
classification.
|
||||
|
||||
.. _longformer:
|
||||
|
||||
Longformer
|
||||
----------------------------------------------
|
||||
|
||||
.. raw:: html
|
||||
|
||||
<a href="https://huggingface.co/models?filter=longformer">
|
||||
<img alt="Models" src="https://img.shields.io/badge/All_model_pages-longformer-blueviolet">
|
||||
</a>
|
||||
<a href="/model_doc/longformer">
|
||||
<img alt="Doc" src="https://img.shields.io/badge/Model_documentation-longformer-blueviolet">
|
||||
</a>
|
||||
|
||||
`Longformer: The Long-Document Transformer <https://arxiv.org/abs/2004.05150>`_, Iz Beltagy et al.
|
||||
|
||||
A transformer model replacing the attention matrices by sparse matrices to go faster. Often, the local context (e.g.,
|
||||
what are the two tokens left and right?) is enough to take action for a given token. Some preselected input tokens are
|
||||
still given global attention, but the attention matrix has way less parameters, resulting in a speed-up. See the
|
||||
:ref:`local attention section <local-attention>` for more information.
|
||||
|
||||
It is pretrained the same way a RoBERTa otherwise.
|
||||
|
||||
**Note:** This model could be very well be used in an autoregressive setting, there is no checkpoint for such a
|
||||
pretraining yet, though.
|
||||
|
||||
The library provides a version of the model for masked language modeling, token classification, sentence
|
||||
classification, multiple choice classification and question answering.
|
||||
|
||||
.. _seq-to-seq-models:
|
||||
|
||||
Sequence-to-sequence models
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
As mentioned before, these models keep both the encoder and the decoder of the original transformer.
|
||||
|
||||
BART
|
||||
----------------------------------------------
|
||||
|
||||
.. raw:: html
|
||||
|
||||
<a href="https://huggingface.co/models?filter=bart">
|
||||
<img alt="Models" src="https://img.shields.io/badge/All_model_pages-bart-blueviolet">
|
||||
</a>
|
||||
<a href="/model_doc/bart">
|
||||
<img alt="Doc" src="https://img.shields.io/badge/Model_documentation-bart-blueviolet">
|
||||
</a>
|
||||
|
||||
`BART: Denoising Sequence-to-Sequence Pre-training for Natural Language Generation, Translation, and Comprehension
|
||||
<https://arxiv.org/abs/1910.13461>`_, Mike Lewis et al.
|
||||
|
||||
Sequence-to-sequence model with an encoder and a decoder. Encoder is fed a corrupted version of the tokens, decoder is
|
||||
fed the tokens (but has a mask to hide the future words like a regular transformers decoder). For the encoder, on the
|
||||
pretraining tasks, a composition of the following transformations are applied:
|
||||
|
||||
* mask random tokens (like in BERT)
|
||||
* delete random tokens
|
||||
* mask a span of k tokens with a single mask token (a span of 0 tokens is an insertion of a mask token)
|
||||
* permute sentences
|
||||
* rotate the document to make it start by a specific token
|
||||
|
||||
The library provides a version of this model for conditional generation and sequence classification.
|
||||
|
||||
MarianMT
|
||||
----------------------------------------------
|
||||
|
||||
.. raw:: html
|
||||
|
||||
<a href="https://huggingface.co/models?filter=marian">
|
||||
<img alt="Models" src="https://img.shields.io/badge/All_model_pages-marian-blueviolet">
|
||||
</a>
|
||||
<a href="/model_doc/marian">
|
||||
<img alt="Doc" src="https://img.shields.io/badge/Model_documentation-marian-blueviolet">
|
||||
</a>
|
||||
|
||||
`Marian: Fast Neural Machine Translation in C++ <https://arxiv.org/abs/1804.00344>`_, Marcin Junczys-Dowmunt et al.
|
||||
|
||||
A framework for translation models, using the same models as BART
|
||||
|
||||
The library provides a version of this model for conditional generation.
|
||||
|
||||
T5
|
||||
----------------------------------------------
|
||||
|
||||
.. raw:: html
|
||||
|
||||
<a href="https://huggingface.co/models?filter=t5">
|
||||
<img alt="Models" src="https://img.shields.io/badge/All_model_pages-t5-blueviolet">
|
||||
</a>
|
||||
<a href="/model_doc/t5">
|
||||
<img alt="Doc" src="https://img.shields.io/badge/Model_documentation-t5-blueviolet">
|
||||
</a>
|
||||
|
||||
`Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer <https://arxiv.org/abs/1910.10683>`_,
|
||||
Colin Raffel et al.
|
||||
|
||||
Uses the traditional transformer model (except a slight change with the positional embeddings, which are learned at
|
||||
each layer). To be able to operate on all NLP tasks, it transforms them in text-to-text problems by using certain
|
||||
prefixes: “Summarize: …”, “question: …”, “translate English to German: …” and so forth.
|
||||
|
||||
The pretraining includes both supervised and self-supervised training. Supervised training is conducted on downstream
|
||||
tasks provided by the GLUE and SuperGLUE benchmarks (changing them to text-to-text tasks as explained above).
|
||||
|
||||
Self-supervised training consists of corrupted pretrained, which means randomly removing 15% of the tokens and
|
||||
replacing them by individual sentinel tokens (if several consecutive tokens are marked for removal, they are replaced
|
||||
by one single sentinel token). The input of the encoder is the corrupted sentence, the input of the decoder the
|
||||
original sentence and the target is then the dropped out tokens delimited by their sentinel tokens.
|
||||
|
||||
For instance, if we have the sentence “My dog is very cute .”, and we decide to remove the token dog, is and cute, the
|
||||
input becomes “My <x> very <y> .” and the target is “<x> dog is <y> . <z>”
|
||||
|
||||
The library provides a version of this model for conditional generation.
|
||||
|
||||
.. _multimodal-models:
|
||||
|
||||
Multimodal models
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
There is one multimodal model in the library which has not been pretrained in the self-supervised fashion like the
|
||||
others.
|
||||
|
||||
MMBT
|
||||
----------------------------------------------
|
||||
|
||||
`Supervised Multimodal Bitransformers for Classifying Images and Text <https://arxiv.org/abs/1909.02950>`_, Douwe Kiela
|
||||
et al.
|
||||
|
||||
A transformers model used in multimodal settings, combining a text and an image to make predictions. The transformer
|
||||
model takes as inputs the embeddings of the tokenized text and a the final activations of a pretrained resnet on the
|
||||
images (after the pooling layer) that goes through a linear layer (to go from number of features at the end of the
|
||||
resnet to the hidden state dimension of the transformer).
|
||||
|
||||
The different inputs are concatenated, and on top of the positional embeddings, a segment embedding is added to let the
|
||||
model know which part of the input vector corresponds to the text or the image.
|
||||
|
||||
The pretrained model only works for classification.
|
||||
|
||||
..
|
||||
More information in this :doc:`model documentation </model_doc/mmbt>`.
|
||||
TODO: write this page
|
||||
|
||||
More technical aspects
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Full vs sparse attention
|
||||
----------------------------------------------
|
||||
|
||||
Most transformer models use full attention in the sense that the attention matrix is square. It can be a big
|
||||
computational bottleneck when you have long texts. Longformer and reformer are models that try to be more efficient and
|
||||
use a sparse version of the attention matrix to speed up training.
|
||||
|
||||
.. _lsh-attention:
|
||||
|
||||
**LSH attention**
|
||||
|
||||
:ref:`Reformer <reformer>` uses LSH attention. In the softmax(QK^t), only the biggest elements (in the softmax
|
||||
dimension) of the matrix QK^t are going to give useful contributions. So for each query q in Q, we can only consider
|
||||
the keys k in K that are close to q. A hash function is used to determine if q and k are close. The attention mask is
|
||||
modified to mask the current token (except at the first position) because it will give a query and key equal (so very
|
||||
similar to each other). Since the hash can be a bit random, several hash functions are used in practice (determined by
|
||||
a n_rounds parameter) then are averaged together.
|
||||
|
||||
.. _local-attention:
|
||||
|
||||
**Local attention**
|
||||
|
||||
:ref:`Longformer <longformer>` uses local attention: often, the local context (e.g., what are the two tokens left and
|
||||
right?) is enough to take action for a given token. Also, by stacking attention layers that have a small window, the
|
||||
last layer will have a receptive field of more than just the tokens on the window, allowing them to build a
|
||||
representation of the whole sentence.
|
||||
|
||||
Some preselected input tokens are also given global attention: for those few tokens, the attention matrix can access
|
||||
all tokens and this process is symmetric: all other tokens have access to those specific tokens (on top of the ones in
|
||||
their local window). This is shown in Figure 2d of the paper, see below for a sample attention mask:
|
||||
|
||||
.. image:: imgs/local_attention_mask.png
|
||||
:scale: 50 %
|
||||
:align: center
|
||||
|
||||
Using those attention matrices with less parameters then allows the model to have inputs having a bigger sequence
|
||||
length.
|
||||
|
||||
Other tricks
|
||||
----------------------------------------------
|
||||
|
||||
.. _axial-pos-encoding:
|
||||
|
||||
**Axial positional encodings**
|
||||
|
||||
:ref:`Reformer <reformer>` uses axial positional encodings: in traditional transformer models, the positional encoding
|
||||
E is a matrix of size :math:`l` by :math:`d`, :math:`l` being the sequence length and :math:`d` the dimension of the
|
||||
hidden state. If you have very long texts, this matrix can be huge and take way too much space on the GPU.
|
||||
|
||||
To alleviate that, axial positional encodings consists in factorizing that big matrix E in two smaller matrices E1 and
|
||||
E2, with dimensions :math:`l_{1} \times d_{1}` and :math:`l_{2} \times d_{2}`, such that :math:`l_{1} \times l_{2} = l`
|
||||
and :math:`d_{1} + d_{2} = d` (with the product for the lengths, this ends up being way smaller). The embedding for
|
||||
time step :math:`j` in E is obtained by concatenating the embeddings for timestep :math:`j \% l1` in E1 and
|
||||
:math:`j // l1` in E2.
|
||||
|
||||
117
docs/source/multilingual.rst
Normal file
117
docs/source/multilingual.rst
Normal file
@ -0,0 +1,117 @@
|
||||
Multi-lingual models
|
||||
================================================
|
||||
|
||||
Most of the models available in this library are mono-lingual models (English, Chinese and German). A few
|
||||
multi-lingual models are available and have a different mechanisms than mono-lingual models.
|
||||
This page details the usage of these models.
|
||||
|
||||
The two models that currently support multiple languages are BERT and XLM.
|
||||
|
||||
XLM
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
XLM has a total of 10 different checkpoints, only one of which is mono-lingual. The 9 remaining model checkpoints can
|
||||
be split in two categories: the checkpoints that make use of language embeddings, and those that don't
|
||||
|
||||
XLM & Language Embeddings
|
||||
------------------------------------------------
|
||||
|
||||
This section concerns the following checkpoints:
|
||||
|
||||
- ``xlm-mlm-ende-1024`` (Masked language modeling, English-German)
|
||||
- ``xlm-mlm-enfr-1024`` (Masked language modeling, English-French)
|
||||
- ``xlm-mlm-enro-1024`` (Masked language modeling, English-Romanian)
|
||||
- ``xlm-mlm-xnli15-1024`` (Masked language modeling, XNLI languages)
|
||||
- ``xlm-mlm-tlm-xnli15-1024`` (Masked language modeling + Translation, XNLI languages)
|
||||
- ``xlm-clm-enfr-1024`` (Causal language modeling, English-French)
|
||||
- ``xlm-clm-ende-1024`` (Causal language modeling, English-German)
|
||||
|
||||
These checkpoints require language embeddings that will specify the language used at inference time. These language
|
||||
embeddings are represented as a tensor that is of the same shape as the input ids passed to the model. The values in
|
||||
these tensors depend on the language used and are identifiable using the ``lang2id`` and ``id2lang`` attributes
|
||||
from the tokenizer.
|
||||
|
||||
Here is an example using the ``xlm-clm-enfr-1024`` checkpoint (Causal language modeling, English-French):
|
||||
|
||||
|
||||
.. code-block::
|
||||
|
||||
>>> import torch
|
||||
>>> from transformers import XLMTokenizer, XLMWithLMHeadModel
|
||||
|
||||
>>> tokenizer = XLMTokenizer.from_pretrained("xlm-clm-enfr-1024")
|
||||
>>> model = XLMWithLMHeadModel.from_pretrained("xlm-clm-enfr-1024")
|
||||
|
||||
|
||||
The different languages this model/tokenizer handles, as well as the ids of these languages are visible using the
|
||||
``lang2id`` attribute:
|
||||
|
||||
.. code-block::
|
||||
|
||||
>>> print(tokenizer.lang2id)
|
||||
{'en': 0, 'fr': 1}
|
||||
|
||||
|
||||
These ids should be used when passing a language parameter during a model pass. Let's define our inputs:
|
||||
|
||||
.. code-block::
|
||||
|
||||
>>> input_ids = torch.tensor([tokenizer.encode("Wikipedia was used to")]) # batch size of 1
|
||||
|
||||
|
||||
We should now define the language embedding by using the previously defined language id. We want to create a tensor
|
||||
filled with the appropriate language ids, of the same size as input_ids. For english, the id is 0:
|
||||
|
||||
.. code-block::
|
||||
|
||||
>>> language_id = tokenizer.lang2id['en'] # 0
|
||||
>>> langs = torch.tensor([language_id] * input_ids.shape[1]) # torch.tensor([0, 0, 0, ..., 0])
|
||||
|
||||
>>> # We reshape it to be of size (batch_size, sequence_length)
|
||||
>>> langs = langs.view(1, -1) # is now of shape [1, sequence_length] (we have a batch size of 1)
|
||||
|
||||
|
||||
You can then feed it all as input to your model:
|
||||
|
||||
.. code-block::
|
||||
|
||||
>>> outputs = model(input_ids, langs=langs)
|
||||
|
||||
|
||||
The example `run_generation.py <https://github.com/huggingface/transformers/blob/master/examples/text-generation/run_generation.py>`__
|
||||
can generate text using the CLM checkpoints from XLM, using the language embeddings.
|
||||
|
||||
XLM without Language Embeddings
|
||||
------------------------------------------------
|
||||
|
||||
This section concerns the following checkpoints:
|
||||
|
||||
- ``xlm-mlm-17-1280`` (Masked language modeling, 17 languages)
|
||||
- ``xlm-mlm-100-1280`` (Masked language modeling, 100 languages)
|
||||
|
||||
These checkpoints do not require language embeddings at inference time. These models are used to have generic
|
||||
sentence representations, differently from previously-mentioned XLM checkpoints.
|
||||
|
||||
|
||||
BERT
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
BERT has two checkpoints that can be used for multi-lingual tasks:
|
||||
|
||||
- ``bert-base-multilingual-uncased`` (Masked language modeling + Next sentence prediction, 102 languages)
|
||||
- ``bert-base-multilingual-cased`` (Masked language modeling + Next sentence prediction, 104 languages)
|
||||
|
||||
These checkpoints do not require language embeddings at inference time. They should identify the language
|
||||
used in the context and infer accordingly.
|
||||
|
||||
XLM-RoBERTa
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
XLM-RoBERTa was trained on 2.5TB of newly created clean CommonCrawl data in 100 languages. It provides strong
|
||||
gains over previously released multi-lingual models like mBERT or XLM on downstream taks like classification,
|
||||
sequence labeling and question answering.
|
||||
|
||||
Two XLM-RoBERTa checkpoints can be used for multi-lingual tasks:
|
||||
|
||||
- ``xlm-roberta-base`` (Masked language modeling, 100 languages)
|
||||
- ``xlm-roberta-large`` (Masked language modeling, 100 languages)
|
||||
1
docs/source/notebooks.md
Symbolic link
1
docs/source/notebooks.md
Symbolic link
@ -0,0 +1 @@
|
||||
../../notebooks/README.md
|
||||
@ -1,16 +0,0 @@
|
||||
Notebooks
|
||||
================================================
|
||||
|
||||
We include `three Jupyter Notebooks <https://github.com/huggingface/transformers/tree/master/notebooks>`_ that can be used to check that the predictions of the PyTorch model are identical to the predictions of the original TensorFlow model.
|
||||
|
||||
|
||||
*
|
||||
The first NoteBook (\ `Comparing-TF-and-PT-models.ipynb <https://github.com/huggingface/transformers/blob/master/notebooks/Comparing-TF-and-PT-models.ipynb>`_\ ) extracts the hidden states of a full sequence on each layers of the TensorFlow and the PyTorch models and computes the standard deviation between them. In the given example, we get a standard deviation of 1.5e-7 to 9e-7 on the various hidden state of the models.
|
||||
|
||||
*
|
||||
The second NoteBook (\ `Comparing-TF-and-PT-models-SQuAD.ipynb <https://github.com/huggingface/transformers/blob/master/notebooks/Comparing-TF-and-PT-models-SQuAD.ipynb>`_\ ) compares the loss computed by the TensorFlow and the PyTorch models for identical initialization of the fine-tuning layer of the ``BertForQuestionAnswering`` and computes the standard deviation between them. In the given example, we get a standard deviation of 2.5e-7 between the models.
|
||||
|
||||
*
|
||||
The third NoteBook (\ `Comparing-TF-and-PT-models-MLM-NSP.ipynb <https://github.com/huggingface/transformers/blob/master/notebooks/Comparing-TF-and-PT-models-MLM-NSP.ipynb>`_\ ) compares the predictions computed by the TensorFlow and the PyTorch models for masked token language modeling using the pre-trained masked language modeling model.
|
||||
|
||||
Please follow the instructions given in the notebooks to run and modify them.
|
||||
73
docs/source/philosophy.rst
Normal file
73
docs/source/philosophy.rst
Normal file
@ -0,0 +1,73 @@
|
||||
Philosophy
|
||||
==========
|
||||
|
||||
🤗 Transformers is an opinionated library built for:
|
||||
|
||||
- NLP researchers and educators seeking to use/study/extend large-scale transformers models
|
||||
- hands-on practitioners who want to fine-tune those models and/or serve them in production
|
||||
- engineers who just want to download a pretrained model and use it to solve a given NLP task.
|
||||
|
||||
The library was designed with two strong goals in mind:
|
||||
|
||||
- Be as easy and fast to use as possible:
|
||||
|
||||
- We strongly limited the number of user-facing abstractions to learn, in fact, there are almost no abstractions,
|
||||
just three standard classes required to use each model: :doc:`configuration <main_classes/configuration>`,
|
||||
:doc:`models <main_classes/model>` and :doc:`tokenizer <main_classes/tokenizer>`.
|
||||
- All of these classes can be initialized in a simple and unified way from pretrained instances by using a common
|
||||
:obj:`from_pretrained()` instantiation method which will take care of downloading (if needed), caching and
|
||||
loading the related class instance and associated data (configurations' hyper-parameters, tokenizers' vocabulary,
|
||||
and models' weights) from a pretrained checkpoint provided on
|
||||
`Hugging Face Hub <https://huggingface.co/models>`__ or your own saved checkpoint.
|
||||
- On top of those three base classes, the library provides two APIs: :func:`~transformers.pipeline` for quickly
|
||||
using a model (plus its associated tokenizer and configuration) on a given task and
|
||||
:func:`~transformers.Trainer`/:func:`~transformers.TFTrainer` to quickly train or fine-tune a given model.
|
||||
- As a consequence, this library is NOT a modular toolbox of building blocks for neural nets. If you want to
|
||||
extend/build-upon the library, just use regular Python/PyTorch/TensorFlow/Keras modules and inherit from the base
|
||||
classes of the library to reuse functionalities like model loading/saving.
|
||||
|
||||
- Provide state-of-the-art models with performances as close as possible to the original models:
|
||||
|
||||
- We provide at least one example for each architecture which reproduces a result provided by the official authors
|
||||
of said architecture.
|
||||
- The code is usually as close to the original code base as possible which means some PyTorch code may be not as
|
||||
*pytorchic* as it could be as a result of being converted TensorFlow code and vice versa.
|
||||
|
||||
A few other goals:
|
||||
|
||||
- Expose the models' internals as consistently as possible:
|
||||
|
||||
- We give access, using a single API, to the full hidden-states and attention weights.
|
||||
- Tokenizer and base model's API are standardized to easily switch between models.
|
||||
|
||||
- Incorporate a subjective selection of promising tools for fine-tuning/investigating these models:
|
||||
|
||||
- A simple/consistent way to add new tokens to the vocabulary and embeddings for fine-tuning.
|
||||
- Simple ways to mask and prune transformer heads.
|
||||
|
||||
- Switch easily between PyTorch and TensorFlow 2.0, allowing training using one framwork and inference using another.
|
||||
|
||||
Main concepts
|
||||
~~~~~~~~~~~~~
|
||||
|
||||
The library is build around three types of classes for each model:
|
||||
|
||||
- **Model classes** such as :class:`~transformers.BertModel`, which are 30+ PyTorch models
|
||||
(`torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__) or Keras models
|
||||
(`tf.keras.Model <https://www.tensorflow.org/api_docs/python/tf/keras/Model>`__) that work with the pretrained
|
||||
weights provided in the library.
|
||||
- **Configuration classes** such as :class:`~transformers.BertConfig`, which store all the parameters required to build
|
||||
a model. You don't always need to instantiate these yourself. In particular, if you are using a pretrained model
|
||||
without any modification, creating the model will automatically take care of instantiating the configuration (which
|
||||
is part of the model).
|
||||
- **Tokenizer classes** such as :class:`~transformers.BertTokenizer`, which store the vocabulary for each model and
|
||||
provide methods for encoding/decoding strings in a list of token embeddings indices to be fed to a model.
|
||||
|
||||
All these classes can be instantiated from pretrained instances and saved locally using two methods:
|
||||
|
||||
- :obj:`from_pretrained()` let you instantiate a model/configuration/tokenizer from a pretrained version either
|
||||
provided by the library itself (the suported models are provided in the list :doc:`here <pretrained_models>`
|
||||
or stored locally (or on a server) by the user,
|
||||
- :obj:`save_pretrained()` let you save a model/configuration/tokenizer locally so that it can be reloaded using
|
||||
:obj:`from_pretrained()`.
|
||||
|
||||
373
docs/source/preprocessing.rst
Normal file
373
docs/source/preprocessing.rst
Normal file
@ -0,0 +1,373 @@
|
||||
Preprocessing data
|
||||
==================
|
||||
|
||||
In this tutorial, we'll explore how to preprocess your data using 🤗 Transformers. The main tool for this is what we
|
||||
|
||||
call a :doc:`tokenizer <main_classes/tokenizer>`. You can build one using the tokenizer class associated to the model
|
||||
you would like to use, or directly with the :class:`~transformers.AutoTokenizer` class.
|
||||
|
||||
As we saw in the :doc:`quicktour </quicktour>`, the tokenizer will first split a given text in words (or part of words,
|
||||
punctuation symbols, etc.) usually called `tokens`. Then it will convert those `tokens` into numbers, to be able to
|
||||
build a tensor out of them and feed them to the model. It will also add any additional inputs the model might expect to
|
||||
work properly.
|
||||
|
||||
.. note::
|
||||
|
||||
If you plan on using a pretrained model, it's important to use the associated pretrained tokenizer: it will split
|
||||
the text you give it in tokens the same way for the pretraining corpus, and it will use the same correspondence
|
||||
token to index (that we usually call a `vocab`) as during pretraining.
|
||||
|
||||
To automatically download the vocab used during pretraining or fine-tuning a given model, you can use the
|
||||
:func:`~transformers.AutoTokenizer.from_pretrained` method:
|
||||
|
||||
::
|
||||
|
||||
from transformers import AutoTokenizer
|
||||
tokenizer = AutoTokenizer.from_pretrained('bert-base-cased')
|
||||
|
||||
Base use
|
||||
~~~~~~~~
|
||||
|
||||
A :class:`~transformers.PreTrainedTokenizer` has many methods, but the only one you need to remember for preprocessing
|
||||
is its ``__call__``: you just need to feed your sentence to your tokenizer object.
|
||||
|
||||
::
|
||||
|
||||
encoded_input = tokenizer("Hello, I'm a single sentence!")
|
||||
print(encoded_input)
|
||||
|
||||
This will return a dictionary string to list of ints like this one:
|
||||
|
||||
::
|
||||
|
||||
{'input_ids': [101, 138, 18696, 155, 1942, 3190, 1144, 1572, 13745, 1104, 159, 9664, 2107, 102],
|
||||
'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
|
||||
'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]}
|
||||
|
||||
The `input_ids <glossary.html#input-ids>`__ are the indices corresponding to each token in our sentence. We will see
|
||||
below what the `attention_mask <glossary.html#attention-mask>`__ is used for and in
|
||||
:ref:`the next section <sentence-pairs>` the goal of `token_type_ids <glossary.html#token-type-ids>`__.
|
||||
|
||||
The tokenizer can decode a list of token ids in a proper sentence:
|
||||
|
||||
::
|
||||
|
||||
tokenizer.decode(encoded_input["input_ids"])
|
||||
|
||||
which should return
|
||||
|
||||
::
|
||||
|
||||
"[CLS] Hello, I'm a single sentence! [SEP]"
|
||||
|
||||
As you can see, the tokenizer automatically added some special tokens that the model expect. Not all model need special
|
||||
tokens; for instance, if we had used` gtp2-medium` instead of `bert-base-cased` to create our tokenizer, we would have
|
||||
|
||||
seen the same sentence as the original one here. You can disable this behavior (which is only advised if you have added
|
||||
those special tokens yourself) by passing ``add_special_tokens=False``.
|
||||
|
||||
If you have several sentences you want to process, you can do this efficiently by sending them as a list to the
|
||||
tokenizer:
|
||||
|
||||
::
|
||||
|
||||
batch_sentences = ["Hello I'm a single sentence",
|
||||
"And another sentence",
|
||||
"And the very very last one"]
|
||||
encoded_inputs = tokenizer(batch_sentences)
|
||||
print(encoded_inputs)
|
||||
|
||||
We get back a dictionary once again, this time with values being list of list of ints:
|
||||
|
||||
::
|
||||
|
||||
{'input_ids': [[101, 8667, 146, 112, 182, 170, 1423, 5650, 102],
|
||||
[101, 1262, 1330, 5650, 102],
|
||||
[101, 1262, 1103, 1304, 1304, 1314, 1141, 102]],
|
||||
'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0, 0]],
|
||||
'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1],
|
||||
[1, 1, 1, 1, 1],
|
||||
[1, 1, 1, 1, 1, 1, 1, 1]]}
|
||||
|
||||
If the purpose of sending several sentences at a time to the tokenizer is to build a batch to feed the model, you will
|
||||
probably want:
|
||||
|
||||
- To pad each sentence to the maximum length there is in your batch.
|
||||
- To truncate each sentence to the maximum length the model can accept (if applicable).
|
||||
- To return tensors.
|
||||
|
||||
You can do all of this by using the following options when feeding your list of sentences to the tokenizer:
|
||||
|
||||
::
|
||||
|
||||
## PYTORCH CODE
|
||||
batch = tokenizer(batch_sentences, padding=True, truncation=True, return_tensors="pt")
|
||||
print(batch)
|
||||
## TENSORFLOW CODE
|
||||
batch = tokenizer(batch_sentences, padding=True, truncation=True, return_tensors="tf")
|
||||
print(batch)
|
||||
|
||||
which should now return a dictionary string to tensor like this:
|
||||
|
||||
::
|
||||
|
||||
{'input_ids': tensor([[ 101, 8667, 146, 112, 182, 170, 1423, 5650, 102],
|
||||
[ 101, 1262, 1330, 5650, 102, 0, 0, 0, 0],
|
||||
[ 101, 1262, 1103, 1304, 1304, 1314, 1141, 102, 0]]),
|
||||
'token_type_ids': tensor([[0, 0, 0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0, 0, 0]]),
|
||||
'attention_mask': tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1],
|
||||
[1, 1, 1, 1, 1, 0, 0, 0, 0],
|
||||
[1, 1, 1, 1, 1, 1, 1, 1, 0]])}
|
||||
|
||||
We can now see what the `attention_mask <glossary.html#attention-mask>`__ is all about: it points out which tokens the
|
||||
model should pay attention to and which ones it should not (because they represent padding in this case).
|
||||
|
||||
|
||||
Note that if your model does not have a maximum length associated to it, the command above will throw a warning. You
|
||||
can safely ignore it. You can also pass ``verbose=False`` to stop the tokenizer to throw those kinds of warnings.
|
||||
|
||||
.. _sentence-pairs:
|
||||
|
||||
Preprocessing pairs of sentences
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Sometimes you need to feed pair of sentences to your model. For instance, if you want to classify if two sentences in a
|
||||
pair are similar, or for question-answering models, which take a context and a question. For BERT models, the input is
|
||||
then represented like this:
|
||||
|
||||
::
|
||||
|
||||
[CLS] Sequence A [SEP] Sequence B [SEP]
|
||||
|
||||
You can encode a pair of sentences in the format expected by your model by supplying the two sentences as two arguments
|
||||
|
||||
(not a list since a list of two sentences will be interpreted as a batch of two single sentences, as we saw before).
|
||||
|
||||
|
||||
::
|
||||
|
||||
encoded_input = tokenizer("How old are you?", "I'm 6 years old")
|
||||
print(encoded_input)
|
||||
|
||||
This will once again return a dict string to list of ints:
|
||||
|
||||
::
|
||||
|
||||
{'input_ids': [101, 1731, 1385, 1132, 1128, 136, 102, 146, 112, 182, 127, 1201, 1385, 102],
|
||||
'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1],
|
||||
'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]}
|
||||
|
||||
This shows us what the `token_type_ids <glossary.html#token-type-ids>`__ are for: they indicate to the model which part
|
||||
of the inputs correspond to the first sentence and which part corresponds to the second sentence. Note that
|
||||
`token_type_ids` are not required or handled by all models. By default, a tokenizer will only return the inputs that
|
||||
its associated model expects. You can force the return (or the non-return) of any of those special arguments by
|
||||
using ``return_input_ids`` or ``return_token_type_ids``.
|
||||
|
||||
If we decode the token ids we obtained, we will see that the special tokens have been properly added.
|
||||
|
||||
::
|
||||
|
||||
tokenizer.decode(encoded_input["input_ids"])
|
||||
|
||||
will return:
|
||||
|
||||
::
|
||||
|
||||
"[CLS] How old are you? [SEP] I'm 6 years old [SEP]"
|
||||
|
||||
If you have a list of pairs of sequences you want to process, you should feed them as two lists to your tokenizer: the
|
||||
list of first sentences and the list of second sentences:
|
||||
|
||||
::
|
||||
|
||||
batch_sentences = ["Hello I'm a single sentence",
|
||||
"And another sentence",
|
||||
"And the very very last one"]
|
||||
batch_of_second_sentences = ["I'm a sentence that goes with the first sentence",
|
||||
"And I should be encoded with the second sentence",
|
||||
"And I go with the very last one"]
|
||||
encoded_inputs = tokenizer(batch_sentences, batch_of_second_sentences)
|
||||
print(encoded_inputs)
|
||||
|
||||
will return a dict with the values being list of lists of ints:
|
||||
|
||||
::
|
||||
|
||||
{'input_ids': [[101, 8667, 146, 112, 182, 170, 1423, 5650, 102, 146, 112, 182, 170, 5650, 1115, 2947, 1114, 1103, 1148, 5650, 102],
|
||||
[101, 1262, 1330, 5650, 102, 1262, 146, 1431, 1129, 12544, 1114, 1103, 1248, 5650, 102],
|
||||
[101, 1262, 1103, 1304, 1304, 1314, 1141, 102, 1262, 146, 1301, 1114, 1103, 1304, 1314, 1141, 102]],
|
||||
'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
|
||||
[0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
|
||||
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1]],
|
||||
'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
|
||||
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
|
||||
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]}
|
||||
|
||||
To double-check what is fed to the model, we can decode each list in `input_ids` one by one:
|
||||
|
||||
::
|
||||
|
||||
for ids in encoded_inputs["input_ids"]:
|
||||
print(tokenizer.decode(ids))
|
||||
|
||||
which will return:
|
||||
|
||||
::
|
||||
|
||||
[CLS] Hello I'm a single sentence [SEP] I'm a sentence that goes with the first sentence [SEP]
|
||||
[CLS] And another sentence [SEP] And I should be encoded with the second sentence [SEP]
|
||||
[CLS] And the very very last one [SEP] And I go with the very last one [SEP]
|
||||
|
||||
Once again, you can automatically pad your inputs to the maximum sentence length in the batch, truncate to the maximum
|
||||
length the model can accept and return tensors directly with the following:
|
||||
|
||||
::
|
||||
|
||||
## PYTORCH CODE
|
||||
batch = tokenizer(batch_sentences, batch_of_second_sentences, padding=True, truncation=True, return_tensors="pt")
|
||||
## TENSORFLOW CODE
|
||||
batch = tokenizer(batch_sentences, batch_of_second_sentences, padding=True, truncation=True, return_tensors="tf")
|
||||
|
||||
Everything you always wanted to know about padding and truncation
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
We have seen the commands that will work for most cases (pad your batch to the length of the maximum sentence and
|
||||
|
||||
truncate to the maximum length the mode can accept). However, the API supports more strategies if you need them. The
|
||||
three arguments you need to know for this are :obj:`padding`, :obj:`truncation` and :obj:`max_length`.
|
||||
|
||||
- :obj:`padding` controls the padding. It can be a boolean or a string which should be:
|
||||
|
||||
- :obj:`True` or :obj:`'longest'` to pad to the longest sequence in the batch (doing no padding if you only provide
|
||||
a single sequence).
|
||||
- :obj:`'max_length'` to pad to a length specified by the :obj:`max_length` argument or the maximum length accepted
|
||||
by the model if no :obj:`max_length` is provided (``max_length=None``). If you only provide a single sequence,
|
||||
padding will still be applied to it.
|
||||
- :obj:`False` or :obj:`'do_not_pad'` to not pad the sequences. As we have seen before, this is the default
|
||||
behavior.
|
||||
|
||||
- :obj:`truncation` controls the truncation. It can be a boolean or a string which should be:
|
||||
|
||||
- :obj:`True` or :obj:`'only_first'` truncate to a maximum length specified by the :obj:`max_length` argument or
|
||||
the maximum length accepted by the model if no :obj:`max_length` is provided (``max_length=None``). This will
|
||||
only truncate the first sentence of a pair if a pair of sequence (or a batch of pairs of sequences) is provided.
|
||||
- :obj:`'only_second'` truncate to a maximum length specified by the :obj:`max_length` argument or the maximum
|
||||
length accepted by the model if no :obj:`max_length` is provided (``max_length=None``). This will only truncate
|
||||
the second sentence of a pair if a pair of sequence (or a batch of pairs of sequences) is provided.
|
||||
- :obj:`'longest_first'` truncate to a maximum length specified by the :obj:`max_length` argument or the maximum
|
||||
length accepted by the model if no :obj:`max_length` is provided (``max_length=None``). This will truncate token
|
||||
by token, removing a token from the longest sequence in the pair until the proper length is reached.
|
||||
- :obj:`False` or :obj:`'do_not_truncate'` to not truncate the sequences. As we have seen before, this is the
|
||||
default behavior.
|
||||
|
||||
- :obj:`max_length` to control the length of the padding/truncation. It can be an integer or :obj:`None`, in which case
|
||||
it will default to the maximum length the model can accept. If the model has no specific maximum input length,
|
||||
truncation/padding to :obj:`max_length` is deactivated.
|
||||
|
||||
Here is a table summarizing the recommend way to setup padding and truncation. If you use pair of inputs sequence in
|
||||
any of the following examples, you can replace :obj:`truncation=True` by a :obj:`STRATEGY` selected in
|
||||
:obj:`['only_first', 'only_second', 'longest_first']`, i.e. :obj:`truncation='only_second'` or
|
||||
:obj:`truncation= 'longest_first'` to control how both sequence in the pair are truncated as detailed before.
|
||||
|
||||
+--------------------------------------+-----------------------------------+---------------------------------------------------------------------------------------------+
|
||||
| Truncation | Padding | Instruction |
|
||||
+======================================+===================================+=============================================================================================+
|
||||
| no truncation | no padding | :obj:`tokenizer(batch_sentences)` |
|
||||
| +-----------------------------------+---------------------------------------------------------------------------------------------+
|
||||
| | padding to max sequence in batch | :obj:`tokenizer(batch_sentences, padding=True)` or |
|
||||
| | | :obj:`tokenizer(batch_sentences, padding='longest')` |
|
||||
| +-----------------------------------+---------------------------------------------------------------------------------------------+
|
||||
| | padding to max model input length | :obj:`tokenizer(batch_sentences, padding='max_length')` |
|
||||
| +-----------------------------------+---------------------------------------------------------------------------------------------+
|
||||
| | padding to specific length | :obj:`tokenizer(batch_sentences, padding='max_length', max_length=42)` |
|
||||
+--------------------------------------+-----------------------------------+---------------------------------------------------------------------------------------------+
|
||||
| truncation to max model input length | no padding | :obj:`tokenizer(batch_sentences, truncation=True)` or |
|
||||
| | | :obj:`tokenizer(batch_sentences, truncation=STRATEGY)` |
|
||||
| +-----------------------------------+---------------------------------------------------------------------------------------------+
|
||||
| | padding to max sequence in batch | :obj:`tokenizer(batch_sentences, padding=True, truncation=True)` or |
|
||||
| | | :obj:`tokenizer(batch_sentences, padding=True, truncation=STRATEGY)` |
|
||||
| +-----------------------------------+---------------------------------------------------------------------------------------------+
|
||||
| | padding to max model input length | :obj:`tokenizer(batch_sentences, padding='max_length', truncation=True)` or |
|
||||
| | | :obj:`tokenizer(batch_sentences, padding='max_length', truncation=STRATEGY)` |
|
||||
| +-----------------------------------+---------------------------------------------------------------------------------------------+
|
||||
| | padding to specific length | Not possible |
|
||||
+--------------------------------------+-----------------------------------+---------------------------------------------------------------------------------------------+
|
||||
| truncation to specific length | no padding | :obj:`tokenizer(batch_sentences, truncation=True, max_length=42)` or |
|
||||
| | | :obj:`tokenizer(batch_sentences, truncation=STRATEGY, max_length=42)` |
|
||||
| +-----------------------------------+---------------------------------------------------------------------------------------------+
|
||||
| | padding to max sequence in batch | :obj:`tokenizer(batch_sentences, padding=True, truncation=True, max_length=42)` or |
|
||||
| | | :obj:`tokenizer(batch_sentences, padding=True, truncation=STRATEGY, max_length=42)` |
|
||||
| +-----------------------------------+---------------------------------------------------------------------------------------------+
|
||||
| | padding to max model input length | Not possible |
|
||||
| +-----------------------------------+---------------------------------------------------------------------------------------------+
|
||||
| | padding to specific length | :obj:`tokenizer(batch_sentences, padding='max_length', truncation=True, max_length=42)` or |
|
||||
| | | :obj:`tokenizer(batch_sentences, padding='max_length', truncation=STRATEGY, max_length=42)` |
|
||||
+--------------------------------------+-----------------------------------+---------------------------------------------------------------------------------------------+
|
||||
|
||||
Pre-tokenized inputs
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The tokenizer also accept pre-tokenized inputs. This is particularly useful when you want to compute labels and extract
|
||||
predictions in `named entity recognition (NER) <https://en.wikipedia.org/wiki/Named-entity_recognition>`__ or
|
||||
`part-of-speech tagging (POS tagging) <https://en.wikipedia.org/wiki/Part-of-speech_tagging>`__.
|
||||
|
||||
If you want to use pre-tokenized inputs, just set :obj:`is_pretokenized=True` when passing your inputs to the
|
||||
tokenizer. For instance:
|
||||
|
||||
::
|
||||
|
||||
encoded_input = tokenizer(["Hello", "I'm", "a", "single", "sentence"], is_pretokenized=True)
|
||||
print(encoded_input)
|
||||
|
||||
will return:
|
||||
|
||||
::
|
||||
|
||||
{'input_ids': [101, 8667, 146, 112, 182, 170, 1423, 5650, 102],
|
||||
'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0],
|
||||
'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1]}
|
||||
|
||||
Note that the tokenizer still adds the ids of special tokens (if applicable) unless you pass
|
||||
``add_special_tokens=False``.
|
||||
|
||||
This works exactly as before for batch of sentences or batch of pairs of sentences. You can encode a batch of sentences
|
||||
like this:
|
||||
|
||||
::
|
||||
|
||||
batch_sentences = [["Hello", "I'm", "a", "single", "sentence"],
|
||||
["And", "another", "sentence"],
|
||||
["And", "the", "very", "very", "last", "one"]]
|
||||
encoded_inputs = tokenizer(batch_sentences, is_pretokenized=True)
|
||||
|
||||
or a batch of pair sentences like this:
|
||||
|
||||
::
|
||||
|
||||
batch_of_second_sentences = [["I'm", "a", "sentence", "that", "goes", "with", "the", "first", "sentence"],
|
||||
["And", "I", "should", "be", "encoded", "with", "the", "second", "sentence"],
|
||||
["And", "I", "go", "with", "the", "very", "last", "one"]]
|
||||
encoded_inputs = tokenizer(batch_sentences, batch_of_second_sentences, is_pretokenized=True)
|
||||
|
||||
And you can add padding, truncation as well as directly return tensors like before:
|
||||
|
||||
::
|
||||
|
||||
## PYTORCH CODE
|
||||
batch = tokenizer(batch_sentences,
|
||||
batch_of_second_sentences,
|
||||
is_pretokenized=True,
|
||||
padding=True,
|
||||
truncation=True,
|
||||
return_tensors="pt")
|
||||
## TENSORFLOW CODE
|
||||
batch = tokenizer(batch_sentences,
|
||||
batch_of_second_sentences,
|
||||
is_pretokenized=True,
|
||||
padding=True,
|
||||
truncation=True,
|
||||
return_tensors="tf")
|
||||
@ -3,6 +3,7 @@ Pretrained models
|
||||
|
||||
Here is the full list of the currently provided pretrained models together with a short presentation of each model.
|
||||
|
||||
For a list that includes community-uploaded models, refer to `https://huggingface.co/models <https://huggingface.co/models>`__.
|
||||
|
||||
+-------------------+------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
|
||||
| Architecture | Shortcut name | Details of the model |
|
||||
@ -21,10 +22,12 @@ Here is the full list of the currently provided pretrained models together with
|
||||
| +------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
|
||||
| | ``bert-base-multilingual-uncased`` | | (Original, not recommended) 12-layer, 768-hidden, 12-heads, 110M parameters. |
|
||||
| | | | Trained on lower-cased text in the top 102 languages with the largest Wikipedias |
|
||||
| | | |
|
||||
| | | (see `details <https://github.com/google-research/bert/blob/master/multilingual.md>`__). |
|
||||
| +------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
|
||||
| | ``bert-base-multilingual-cased`` | | (New, **recommended**) 12-layer, 768-hidden, 12-heads, 110M parameters. |
|
||||
| | | | Trained on cased text in the top 104 languages with the largest Wikipedias |
|
||||
| | | |
|
||||
| | | (see `details <https://github.com/google-research/bert/blob/master/multilingual.md>`__). |
|
||||
| +------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
|
||||
| | ``bert-base-chinese`` | | 12-layer, 768-hidden, 12-heads, 110M parameters. |
|
||||
@ -32,27 +35,80 @@ Here is the full list of the currently provided pretrained models together with
|
||||
| +------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
|
||||
| | ``bert-base-german-cased`` | | 12-layer, 768-hidden, 12-heads, 110M parameters. |
|
||||
| | | | Trained on cased German text by Deepset.ai |
|
||||
| | | |
|
||||
| | | (see `details on deepset.ai website <https://deepset.ai/german-bert>`__). |
|
||||
| +------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
|
||||
| | ``bert-large-uncased-whole-word-masking`` | | 24-layer, 1024-hidden, 16-heads, 340M parameters. |
|
||||
| | | | Trained on lower-cased English text using Whole-Word-Masking |
|
||||
| | | |
|
||||
| | | (see `details <https://github.com/google-research/bert/#bert>`__). |
|
||||
| +------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
|
||||
| | ``bert-large-cased-whole-word-masking`` | | 24-layer, 1024-hidden, 16-heads, 340M parameters. |
|
||||
| | | | Trained on cased English text using Whole-Word-Masking |
|
||||
| | | |
|
||||
| | | (see `details <https://github.com/google-research/bert/#bert>`__). |
|
||||
| +------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
|
||||
| | ``bert-large-uncased-whole-word-masking-finetuned-squad`` | | 24-layer, 1024-hidden, 16-heads, 340M parameters. |
|
||||
| | | | The ``bert-large-uncased-whole-word-masking`` model fine-tuned on SQuAD |
|
||||
| | | |
|
||||
| | | (see details of fine-tuning in the `example section <https://github.com/huggingface/transformers/tree/master/examples>`__). |
|
||||
| +------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
|
||||
| | ``bert-large-cased-whole-word-masking-finetuned-squad`` | | 24-layer, 1024-hidden, 16-heads, 340M parameters |
|
||||
| | | | The ``bert-large-cased-whole-word-masking`` model fine-tuned on SQuAD |
|
||||
| | | |
|
||||
| | | (see `details of fine-tuning in the example section <https://huggingface.co/transformers/examples.html>`__) |
|
||||
| +------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
|
||||
| | ``bert-base-cased-finetuned-mrpc`` | | 12-layer, 768-hidden, 12-heads, 110M parameters. |
|
||||
| | | | The ``bert-base-cased`` model fine-tuned on MRPC |
|
||||
| | | |
|
||||
| | | (see `details of fine-tuning in the example section <https://huggingface.co/transformers/examples.html>`__) |
|
||||
| +------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
|
||||
| | ``bert-base-german-dbmdz-cased`` | | 12-layer, 768-hidden, 12-heads, 110M parameters. |
|
||||
| | | | Trained on cased German text by DBMDZ |
|
||||
| | | |
|
||||
| | | (see `details on dbmdz repository <https://github.com/dbmdz/german-bert>`__). |
|
||||
| +------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
|
||||
| | ``bert-base-german-dbmdz-uncased`` | | 12-layer, 768-hidden, 12-heads, 110M parameters. |
|
||||
| | | | Trained on uncased German text by DBMDZ |
|
||||
| | | |
|
||||
| | | (see `details on dbmdz repository <https://github.com/dbmdz/german-bert>`__). |
|
||||
| +------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
|
||||
| | ``cl-tohoku/bert-base-japanese`` | | 12-layer, 768-hidden, 12-heads, 110M parameters. |
|
||||
| | | | Trained on Japanese text. Text is tokenized with MeCab and WordPiece. |
|
||||
| | | | `MeCab <https://taku910.github.io/mecab/>`__ is required for tokenization. |
|
||||
| | | |
|
||||
| | | (see `details on cl-tohoku repository <https://github.com/cl-tohoku/bert-japanese>`__). |
|
||||
| +------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
|
||||
| | ``cl-tohoku/bert-base-japanese-whole-word-masking`` | | 12-layer, 768-hidden, 12-heads, 110M parameters. |
|
||||
| | | | Trained on Japanese text using Whole-Word-Masking. Text is tokenized with MeCab and WordPiece. |
|
||||
| | | | `MeCab <https://taku910.github.io/mecab/>`__ is required for tokenization. |
|
||||
| | | |
|
||||
| | | (see `details on cl-tohoku repository <https://github.com/cl-tohoku/bert-japanese>`__). |
|
||||
| +------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
|
||||
| | ``cl-tohoku/bert-base-japanese-char`` | | 12-layer, 768-hidden, 12-heads, 110M parameters. |
|
||||
| | | | Trained on Japanese text. Text is tokenized into characters. |
|
||||
| | | |
|
||||
| | | (see `details on cl-tohoku repository <https://github.com/cl-tohoku/bert-japanese>`__). |
|
||||
| +------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
|
||||
| | ``cl-tohoku/bert-base-japanese-char-whole-word-masking`` | | 12-layer, 768-hidden, 12-heads, 110M parameters. |
|
||||
| | | | Trained on Japanese text using Whole-Word-Masking. Text is tokenized into characters. |
|
||||
| | | |
|
||||
| | | (see `details on cl-tohoku repository <https://github.com/cl-tohoku/bert-japanese>`__). |
|
||||
| +------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
|
||||
| | ``TurkuNLP/bert-base-finnish-cased-v1`` | | 12-layer, 768-hidden, 12-heads, 110M parameters. |
|
||||
| | | | Trained on cased Finnish text. |
|
||||
| | | |
|
||||
| | | (see `details on turkunlp.org <http://turkunlp.org/FinBERT/>`__). |
|
||||
| +------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
|
||||
| | ``TurkuNLP/bert-base-finnish-uncased-v1`` | | 12-layer, 768-hidden, 12-heads, 110M parameters. |
|
||||
| | | | Trained on uncased Finnish text. |
|
||||
| | | |
|
||||
| | | (see `details on turkunlp.org <http://turkunlp.org/FinBERT/>`__). |
|
||||
| +------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
|
||||
| | ``wietsedv/bert-base-dutch-cased`` | | 12-layer, 768-hidden, 12-heads, 110M parameters. |
|
||||
| | | | Trained on cased Dutch text. |
|
||||
| | | |
|
||||
| | | (see `details on wietsedv repository <https://github.com/wietsedv/bertje/>`__). |
|
||||
+-------------------+------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
|
||||
| GPT | ``openai-gpt`` | | 12-layer, 768-hidden, 12-heads, 110M parameters. |
|
||||
| | | | OpenAI GPT English model |
|
||||
@ -65,6 +121,9 @@ Here is the full list of the currently provided pretrained models together with
|
||||
| +------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
|
||||
| | ``gpt2-large`` | | 36-layer, 1280-hidden, 20-heads, 774M parameters. |
|
||||
| | | | OpenAI's Large-sized GPT-2 English model |
|
||||
| +------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
|
||||
| | ``gpt2-xl`` | | 48-layer, 1600-hidden, 25-heads, 1558M parameters. |
|
||||
| | | | OpenAI's XL-sized GPT-2 English model |
|
||||
+-------------------+------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
|
||||
| Transformer-XL | ``transfo-xl-wt103`` | | 18-layer, 1024-hidden, 16-heads, 257M parameters. |
|
||||
| | | | English model trained on wikitext-103 |
|
||||
@ -98,26 +157,203 @@ Here is the full list of the currently provided pretrained models together with
|
||||
| +------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
|
||||
| | ``xlm-clm-ende-1024`` | | 6-layer, 1024-hidden, 8-heads |
|
||||
| | | | XLM English-German model trained with CLM (Causal Language Modeling) on the concatenation of English and German wikipedia |
|
||||
| +------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
|
||||
| | ``xlm-mlm-17-1280`` | | 16-layer, 1280-hidden, 16-heads |
|
||||
| | | | XLM model trained with MLM (Masked Language Modeling) on 17 languages. |
|
||||
| +------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
|
||||
| | ``xlm-mlm-100-1280`` | | 16-layer, 1280-hidden, 16-heads |
|
||||
| | | | XLM model trained with MLM (Masked Language Modeling) on 100 languages. |
|
||||
+-------------------+------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
|
||||
| RoBERTa | ``roberta-base`` | | 12-layer, 768-hidden, 12-heads, 125M parameters |
|
||||
| | | | RoBERTa using the BERT-base architecture |
|
||||
| | | |
|
||||
| | | (see `details <https://github.com/pytorch/fairseq/tree/master/examples/roberta>`__) |
|
||||
| +------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
|
||||
| | ``roberta-large`` | | 24-layer, 1024-hidden, 16-heads, 355M parameters |
|
||||
| | | | RoBERTa using the BERT-large architecture |
|
||||
| | | |
|
||||
| | | (see `details <https://github.com/pytorch/fairseq/tree/master/examples/roberta>`__) |
|
||||
| +------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
|
||||
| | ``roberta-large-mnli`` | | 24-layer, 1024-hidden, 16-heads, 355M parameters |
|
||||
| | | | ``roberta-large`` fine-tuned on `MNLI <http://www.nyu.edu/projects/bowman/multinli/>`__. |
|
||||
| | | |
|
||||
| | | (see `details <https://github.com/pytorch/fairseq/tree/master/examples/roberta>`__) |
|
||||
| +------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
|
||||
| | ``distilroberta-base`` | | 6-layer, 768-hidden, 12-heads, 82M parameters |
|
||||
| | | | The DistilRoBERTa model distilled from the RoBERTa model `roberta-base` checkpoint. |
|
||||
| | | |
|
||||
| | | (see `details <https://github.com/huggingface/transformers/tree/master/examples/distillation>`__) |
|
||||
| +------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
|
||||
| | ``roberta-base-openai-detector`` | | 12-layer, 768-hidden, 12-heads, 125M parameters |
|
||||
| | | | ``roberta-base`` fine-tuned by OpenAI on the outputs of the 1.5B-parameter GPT-2 model. |
|
||||
| | | |
|
||||
| | | (see `details <https://github.com/openai/gpt-2-output-dataset/tree/master/detector>`__) |
|
||||
| +------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
|
||||
| | ``roberta-large-openai-detector`` | | 24-layer, 1024-hidden, 16-heads, 355M parameters |
|
||||
| | | | ``roberta-large`` fine-tuned by OpenAI on the outputs of the 1.5B-parameter GPT-2 model. |
|
||||
| | | |
|
||||
| | | (see `details <https://github.com/openai/gpt-2-output-dataset/tree/master/detector>`__) |
|
||||
+-------------------+------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
|
||||
| DistilBERT | ``distilbert-base-uncased`` | | 6-layer, 768-hidden, 12-heads, 66M parameters |
|
||||
| | | | The DistilBERT model distilled from the BERT model `bert-base-uncased` checkpoint |
|
||||
| | | (see `details <https://medium.com/huggingface/distilbert-8cf3380435b5>`__) |
|
||||
| | | |
|
||||
| | | (see `details <https://github.com/huggingface/transformers/tree/master/examples/distillation>`__) |
|
||||
| +------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
|
||||
| | ``distilbert-base-uncased-distilled-squad`` | | 6-layer, 768-hidden, 12-heads, 66M parameters |
|
||||
| | | | The DistilBERT model distilled from the BERT model `bert-base-uncased` checkpoint, with an additional linear layer. |
|
||||
| | | (see `details <https://medium.com/huggingface/distilbert-8cf3380435b5>`__) |
|
||||
| | | |
|
||||
| | | (see `details <https://github.com/huggingface/transformers/tree/master/examples/distillation>`__) |
|
||||
| +------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
|
||||
| | ``distilbert-base-cased`` | | 6-layer, 768-hidden, 12-heads, 65M parameters |
|
||||
| | | | The DistilBERT model distilled from the BERT model `bert-base-cased` checkpoint |
|
||||
| | | |
|
||||
| | | (see `details <https://github.com/huggingface/transformers/tree/master/examples/distillation>`__) |
|
||||
| +------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
|
||||
| | ``distilbert-base-cased-distilled-squad`` | | 6-layer, 768-hidden, 12-heads, 65M parameters |
|
||||
| | | | The DistilBERT model distilled from the BERT model `bert-base-cased` checkpoint, with an additional question answering layer. |
|
||||
| | | |
|
||||
| | | (see `details <https://github.com/huggingface/transformers/tree/master/examples/distillation>`__) |
|
||||
| +------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
|
||||
| | ``distilgpt2`` | | 6-layer, 768-hidden, 12-heads, 82M parameters |
|
||||
| | | | The DistilGPT2 model distilled from the GPT2 model `gpt2` checkpoint. |
|
||||
| | | |
|
||||
| | | (see `details <https://github.com/huggingface/transformers/tree/master/examples/distillation>`__) |
|
||||
| +------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
|
||||
| | ``distilbert-base-german-cased`` | | 6-layer, 768-hidden, 12-heads, 66M parameters |
|
||||
| | | | The German DistilBERT model distilled from the German DBMDZ BERT model `bert-base-german-dbmdz-cased` checkpoint. |
|
||||
| | | |
|
||||
| | | (see `details <https://github.com/huggingface/transformers/tree/master/examples/distillation>`__) |
|
||||
| +------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
|
||||
| | ``distilbert-base-multilingual-cased`` | | 6-layer, 768-hidden, 12-heads, 134M parameters |
|
||||
| | | | The multilingual DistilBERT model distilled from the Multilingual BERT model `bert-base-multilingual-cased` checkpoint. |
|
||||
| | | |
|
||||
| | | (see `details <https://github.com/huggingface/transformers/tree/master/examples/distillation>`__) |
|
||||
+-------------------+------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
|
||||
| CTRL | ``ctrl`` | | 48-layer, 1280-hidden, 16-heads, 1.6B parameters |
|
||||
| | | | Salesforce's Large-sized CTRL English model |
|
||||
+-------------------+------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
|
||||
| CamemBERT | ``camembert-base`` | | 12-layer, 768-hidden, 12-heads, 110M parameters |
|
||||
| | | | CamemBERT using the BERT-base architecture |
|
||||
| | | |
|
||||
| | | (see `details <https://github.com/pytorch/fairseq/tree/master/examples/camembert>`__) |
|
||||
+-------------------+------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
|
||||
| ALBERT | ``albert-base-v1`` | | 12 repeating layers, 128 embedding, 768-hidden, 12-heads, 11M parameters |
|
||||
| | | | ALBERT base model |
|
||||
| | | |
|
||||
| | | (see `details <https://github.com/google-research/ALBERT>`__) |
|
||||
| +------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
|
||||
| | ``albert-large-v1`` | | 24 repeating layers, 128 embedding, 1024-hidden, 16-heads, 17M parameters |
|
||||
| | | | ALBERT large model |
|
||||
| | | |
|
||||
| | | (see `details <https://github.com/google-research/ALBERT>`__) |
|
||||
| +------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
|
||||
| | ``albert-xlarge-v1`` | | 24 repeating layers, 128 embedding, 2048-hidden, 16-heads, 58M parameters |
|
||||
| | | | ALBERT xlarge model |
|
||||
| | | |
|
||||
| | | (see `details <https://github.com/google-research/ALBERT>`__) |
|
||||
| +------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
|
||||
| | ``albert-xxlarge-v1`` | | 12 repeating layer, 128 embedding, 4096-hidden, 64-heads, 223M parameters |
|
||||
| | | | ALBERT xxlarge model |
|
||||
| | | |
|
||||
| | | (see `details <https://github.com/google-research/ALBERT>`__) |
|
||||
| +------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
|
||||
| | ``albert-base-v2`` | | 12 repeating layers, 128 embedding, 768-hidden, 12-heads, 11M parameters |
|
||||
| | | | ALBERT base model with no dropout, additional training data and longer training |
|
||||
| | | |
|
||||
| | | (see `details <https://github.com/google-research/ALBERT>`__) |
|
||||
| +------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
|
||||
| | ``albert-large-v2`` | | 24 repeating layers, 128 embedding, 1024-hidden, 16-heads, 17M parameters |
|
||||
| | | | ALBERT large model with no dropout, additional training data and longer training |
|
||||
| | | |
|
||||
| | | (see `details <https://github.com/google-research/ALBERT>`__) |
|
||||
| +------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
|
||||
| | ``albert-xlarge-v2`` | | 24 repeating layers, 128 embedding, 2048-hidden, 16-heads, 58M parameters |
|
||||
| | | | ALBERT xlarge model with no dropout, additional training data and longer training |
|
||||
| | | |
|
||||
| | | (see `details <https://github.com/google-research/ALBERT>`__) |
|
||||
| +------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
|
||||
| | ``albert-xxlarge-v2`` | | 12 repeating layer, 128 embedding, 4096-hidden, 64-heads, 223M parameters |
|
||||
| | | | ALBERT xxlarge model with no dropout, additional training data and longer training |
|
||||
| | | |
|
||||
| | | (see `details <https://github.com/google-research/ALBERT>`__) |
|
||||
+-------------------+------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
|
||||
| T5 | ``t5-small`` | | ~60M parameters with 6-layers, 512-hidden-state, 2048 feed-forward hidden-state, 8-heads, |
|
||||
| | | | Trained on English text: the Colossal Clean Crawled Corpus (C4) |
|
||||
| +------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
|
||||
| | ``t5-base`` | | ~220M parameters with 12-layers, 768-hidden-state, 3072 feed-forward hidden-state, 12-heads, |
|
||||
| | | | Trained on English text: the Colossal Clean Crawled Corpus (C4) |
|
||||
| +------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
|
||||
| | ``t5-large`` | | ~770M parameters with 24-layers, 1024-hidden-state, 4096 feed-forward hidden-state, 16-heads, |
|
||||
| | | | Trained on English text: the Colossal Clean Crawled Corpus (C4) |
|
||||
| +------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
|
||||
| | ``t5-3B`` | | ~2.8B parameters with 24-layers, 1024-hidden-state, 16384 feed-forward hidden-state, 32-heads, |
|
||||
| | | | Trained on English text: the Colossal Clean Crawled Corpus (C4) |
|
||||
| +------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
|
||||
| | ``t5-11B`` | | ~11B parameters with 24-layers, 1024-hidden-state, 65536 feed-forward hidden-state, 128-heads, |
|
||||
| | | | Trained on English text: the Colossal Clean Crawled Corpus (C4) |
|
||||
+-------------------+------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
|
||||
| XLM-RoBERTa | ``xlm-roberta-base`` | | ~125M parameters with 12-layers, 768-hidden-state, 3072 feed-forward hidden-state, 8-heads, |
|
||||
| | | | Trained on on 2.5 TB of newly created clean CommonCrawl data in 100 languages |
|
||||
| +------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
|
||||
| | ``xlm-roberta-large`` | | ~355M parameters with 24-layers, 1027-hidden-state, 4096 feed-forward hidden-state, 16-heads, |
|
||||
| | | | Trained on 2.5 TB of newly created clean CommonCrawl data in 100 languages |
|
||||
+-------------------+------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
|
||||
| FlauBERT | ``flaubert/flaubert_small_cased`` | | 6-layer, 512-hidden, 8-heads, 54M parameters |
|
||||
| | | | FlauBERT small architecture |
|
||||
| | | |
|
||||
| | | (see `details <https://github.com/getalp/Flaubert>`__) |
|
||||
| +------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
|
||||
| | ``flaubert/flaubert_base_uncased`` | | 12-layer, 768-hidden, 12-heads, 137M parameters |
|
||||
| | | | FlauBERT base architecture with uncased vocabulary |
|
||||
| | | |
|
||||
| | | (see `details <https://github.com/getalp/Flaubert>`__) |
|
||||
| +------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
|
||||
| | ``flaubert/flaubert_base_cased`` | | 12-layer, 768-hidden, 12-heads, 138M parameters |
|
||||
| | | | FlauBERT base architecture with cased vocabulary |
|
||||
| | | |
|
||||
| | | (see `details <https://github.com/getalp/Flaubert>`__) |
|
||||
| +------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
|
||||
| | ``flaubert/flaubert_large_cased`` | | 24-layer, 1024-hidden, 16-heads, 373M parameters |
|
||||
| | | | FlauBERT large architecture |
|
||||
| | | |
|
||||
| | | (see `details <https://github.com/getalp/Flaubert>`__) |
|
||||
+-------------------+------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
|
||||
| Bart | ``facebook/bart-large`` | | 24-layer, 1024-hidden, 16-heads, 406M parameters |
|
||||
| | | |
|
||||
| | | (see `details <https://github.com/pytorch/fairseq/tree/master/examples/bart>`_) |
|
||||
| +------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
|
||||
| | ``facebook/bart-base`` | | 12-layer, 768-hidden, 16-heads, 139M parameters |
|
||||
| +------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
|
||||
| | ``facebook/bart-large-mnli`` | | Adds a 2 layer classification head with 1 million parameters |
|
||||
| | | | bart-large base architecture with a classification head, finetuned on MNLI |
|
||||
| +------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
|
||||
| | ``facebook/bart-large-cnn`` | | 12-layer, 1024-hidden, 16-heads, 406M parameters (same as base) |
|
||||
| | | | bart-large base architecture finetuned on cnn summarization task |
|
||||
| +------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
|
||||
| | ``facebook/mbart-large-en-ro`` | | 12-layer, 1024-hidden, 16-heads, 880M parameters |
|
||||
| | | | bart-large architecture pretrained on cc25 multilingual data , finetuned on WMT english romanian translation. |
|
||||
+-------------------+------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
|
||||
| DialoGPT | ``DialoGPT-small`` | | 12-layer, 768-hidden, 12-heads, 124M parameters |
|
||||
| | | | Trained on English text: 147M conversation-like exchanges extracted from Reddit. |
|
||||
| +------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
|
||||
| | ``DialoGPT-medium`` | | 24-layer, 1024-hidden, 16-heads, 355M parameters |
|
||||
| | | | Trained on English text: 147M conversation-like exchanges extracted from Reddit. |
|
||||
| +------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
|
||||
| | ``DialoGPT-large`` | | 36-layer, 1280-hidden, 20-heads, 774M parameters |
|
||||
| | | | Trained on English text: 147M conversation-like exchanges extracted from Reddit. |
|
||||
+-------------------+------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
|
||||
| Reformer | ``reformer-enwik8`` | | 12-layer, 1024-hidden, 8-heads, 149M parameters |
|
||||
| | | | Trained on English Wikipedia data - enwik8. |
|
||||
| +------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
|
||||
| | ``reformer-crime-and-punishment`` | | 6-layer, 256-hidden, 2-heads, 3M parameters |
|
||||
| | | | Trained on English text: Crime and Punishment novel by Fyodor Dostoyevsky. |
|
||||
+-------------------+------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
|
||||
| MarianMT | ``Helsinki-NLP/opus-mt-{src}-{tgt}`` | | 12-layer, 512-hidden, 8-heads, ~74M parameter Machine translation models. Parameter counts vary depending on vocab size. |
|
||||
| | | | (see `model list <https://huggingface.co/Helsinki-NLP>`_) |
|
||||
+-------------------+------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
|
||||
| Longformer | ``allenai/longformer-base-4096`` | | 12-layer, 768-hidden, 12-heads, ~149M parameters |
|
||||
| | | | Starting from RoBERTa-base checkpoint, trained on documents of max length 4,096 |
|
||||
| +------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
|
||||
| | ``allenai/longformer-large-4096`` | | 24-layer, 1024-hidden, 16-heads, ~435M parameters |
|
||||
| | | | Starting from RoBERTa-large checkpoint, trained on documents of max length 4,096 |
|
||||
+-------------------+------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
|
||||
|
||||
.. <https://huggingface.co/transformers/examples.html>`__
|
||||
@ -1,190 +0,0 @@
|
||||
# Quickstart
|
||||
|
||||
## Philosophy
|
||||
|
||||
Transformers is an opinionated library built for NLP researchers seeking to use/study/extend large-scale transformers models.
|
||||
|
||||
The library was designed with two strong goals in mind:
|
||||
|
||||
- be as easy and fast to use as possible:
|
||||
|
||||
- we strongly limited the number of user-facing abstractions to learn, in fact there are almost no abstractions, just three standard classes required to use each model: configuration, models and tokenizer,
|
||||
- all of these classes can be initialized in a simple and unified way from pretrained instances by using a common `from_pretrained()` instantiation method which will take care of downloading (if needed), caching and loading the related class from a pretrained instance supplied in the library or your own saved instance.
|
||||
- as a consequence, this library is NOT a modular toolbox of building blocks for neural nets. If you want to extend/build-upon the library, just use regular Python/PyTorch modules and inherit from the base classes of the library to reuse functionalities like model loading/saving.
|
||||
|
||||
- provide state-of-the-art models with performances as close as possible to the original models:
|
||||
|
||||
- we provide at least one example for each architecture which reproduces a result provided by the official authors of said architecture,
|
||||
- the code is usually as close to the original code base as possible which means some PyTorch code may be not as *pytorchic* as it could be as a result of being converted TensorFlow code.
|
||||
|
||||
A few other goals:
|
||||
|
||||
- expose the models internals as consistently as possible:
|
||||
|
||||
- we give access, using a single API to the full hidden-states and attention weights,
|
||||
- tokenizer and base model's API are standardized to easily switch between models.
|
||||
|
||||
- incorporate a subjective selection of promising tools for fine-tuning/investiguating these models:
|
||||
|
||||
- a simple/consistent way to add new tokens to the vocabulary and embeddings for fine-tuning,
|
||||
- simple ways to mask and prune transformer heads.
|
||||
|
||||
## Main concepts
|
||||
|
||||
The library is build around three type of classes for each models:
|
||||
|
||||
- **model classes** which are PyTorch models (`torch.nn.Modules`) of the 6 models architectures currently provided in the library, e.g. `BertModel`
|
||||
- **configuration classes** which store all the parameters required to build a model, e.g. `BertConfig`. You don't always need to instantiate these your-self, in particular if you are using a pretrained model without any modification, creating the model will automatically take care of instantiating the configuration (which is part of the model)
|
||||
- **tokenizer classes** which store the vocabulary for each model and provide methods for encoding/decoding strings in list of token embeddings indices to be fed to a model, e.g. `BertTokenizer`
|
||||
|
||||
All these classes can be instantiated from pretrained instances and saved locally using two methods:
|
||||
|
||||
- `from_pretrained()` let you instantiate a model/configuration/tokenizer from a pretrained version either provided by the library itself (currently 27 models are provided as listed [here](https://huggingface.co/transformers/pretrained_models.html)) or stored locally (or on a server) by the user,
|
||||
- `save_pretrained()` let you save a model/configuration/tokenizer locally so that it can be reloaded using `from_pretrained()`.
|
||||
|
||||
We'll finish this quickstart tour by going through a few simple quick-start examples to see how we can instantiate and use these classes. The rest of the documentation is organized in two parts:
|
||||
|
||||
- the **MAIN CLASSES** section details the common functionalities/method/attributes of the three main type of classes (configuration, model, tokenizer) plus some optimization related classes provided as utilities for training,
|
||||
- the **PACKAGE REFERENCE** section details all the variants of each class for each model architectures and in particular the input/output that you should expect when calling each of them.
|
||||
|
||||
## Quick tour: Usage
|
||||
|
||||
Here are two examples showcasing a few `Bert` and `GPT2` classes and pre-trained models.
|
||||
|
||||
See full API reference for examples for each model classe.
|
||||
|
||||
### BERT example
|
||||
|
||||
Let's start by preparing a tokenized input (a list of token embeddings indices to be fed to Bert) from a text string using `BertTokenizer`
|
||||
|
||||
```python
|
||||
import torch
|
||||
from transformers import BertTokenizer, BertModel, BertForMaskedLM
|
||||
|
||||
# OPTIONAL: if you want to have more information on what's happening under the hood, activate the logger as follows
|
||||
import logging
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
|
||||
# Load pre-trained model tokenizer (vocabulary)
|
||||
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
|
||||
|
||||
# Tokenize input
|
||||
text = "[CLS] Who was Jim Henson ? [SEP] Jim Henson was a puppeteer [SEP]"
|
||||
tokenized_text = tokenizer.tokenize(text)
|
||||
|
||||
# Mask a token that we will try to predict back with `BertForMaskedLM`
|
||||
masked_index = 8
|
||||
tokenized_text[masked_index] = '[MASK]'
|
||||
assert tokenized_text == ['[CLS]', 'who', 'was', 'jim', 'henson', '?', '[SEP]', 'jim', '[MASK]', 'was', 'a', 'puppet', '##eer', '[SEP]']
|
||||
|
||||
# Convert token to vocabulary indices
|
||||
indexed_tokens = tokenizer.convert_tokens_to_ids(tokenized_text)
|
||||
# Define sentence A and B indices associated to 1st and 2nd sentences (see paper)
|
||||
segments_ids = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1]
|
||||
|
||||
# Convert inputs to PyTorch tensors
|
||||
tokens_tensor = torch.tensor([indexed_tokens])
|
||||
segments_tensors = torch.tensor([segments_ids])
|
||||
```
|
||||
|
||||
Let's see how we can use `BertModel` to encode our inputs in hidden-states:
|
||||
|
||||
```python
|
||||
# Load pre-trained model (weights)
|
||||
model = BertModel.from_pretrained('bert-base-uncased')
|
||||
|
||||
# Set the model in evaluation mode to desactivate the DropOut modules
|
||||
# This is IMPORTANT to have reproductible results during evaluation!
|
||||
model.eval()
|
||||
|
||||
# If you have a GPU, put everything on cuda
|
||||
tokens_tensor = tokens_tensor.to('cuda')
|
||||
segments_tensors = segments_tensors.to('cuda')
|
||||
model.to('cuda')
|
||||
|
||||
# Predict hidden states features for each layer
|
||||
with torch.no_grad():
|
||||
# See the models docstrings for the detail of the inputs
|
||||
outputs = model(tokens_tensor, token_type_ids=segments_tensors)
|
||||
# Transformers models always output tuples.
|
||||
# See the models docstrings for the detail of all the outputs
|
||||
# In our case, the first element is the hidden state of the last layer of the Bert model
|
||||
encoded_layers = outputs[0]
|
||||
# We have encoded our input sequence in a FloatTensor of shape (batch size, sequence length, model hidden dimension)
|
||||
assert tuple(encoded_layers.shape) == (1, len(indexed_tokens), model.config.hidden_size)
|
||||
```
|
||||
|
||||
And how to use `BertForMaskedLM` to predict a masked token:
|
||||
|
||||
```python
|
||||
# Load pre-trained model (weights)
|
||||
model = BertForMaskedLM.from_pretrained('bert-base-uncased')
|
||||
model.eval()
|
||||
|
||||
# If you have a GPU, put everything on cuda
|
||||
tokens_tensor = tokens_tensor.to('cuda')
|
||||
segments_tensors = segments_tensors.to('cuda')
|
||||
model.to('cuda')
|
||||
|
||||
# Predict all tokens
|
||||
with torch.no_grad():
|
||||
outputs = model(tokens_tensor, token_type_ids=segments_tensors)
|
||||
predictions = outputs[0]
|
||||
|
||||
# confirm we were able to predict 'henson'
|
||||
predicted_index = torch.argmax(predictions[0, masked_index]).item()
|
||||
predicted_token = tokenizer.convert_ids_to_tokens([predicted_index])[0]
|
||||
assert predicted_token == 'henson'
|
||||
```
|
||||
|
||||
### OpenAI GPT-2
|
||||
|
||||
Here is a quick-start example using `GPT2Tokenizer` and `GPT2LMHeadModel` class with OpenAI's pre-trained model to predict the next token from a text prompt.
|
||||
|
||||
First let's prepare a tokenized input from our text string using `GPT2Tokenizer`
|
||||
|
||||
```python
|
||||
import torch
|
||||
from transformers import GPT2Tokenizer, GPT2LMHeadModel
|
||||
|
||||
# OPTIONAL: if you want to have more information on what's happening, activate the logger as follows
|
||||
import logging
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
|
||||
# Load pre-trained model tokenizer (vocabulary)
|
||||
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
|
||||
|
||||
# Encode a text inputs
|
||||
text = "Who was Jim Henson ? Jim Henson was a"
|
||||
indexed_tokens = tokenizer.encode(text)
|
||||
|
||||
# Convert indexed tokens in a PyTorch tensor
|
||||
tokens_tensor = torch.tensor([indexed_tokens])
|
||||
```
|
||||
|
||||
Let's see how to use `GPT2LMHeadModel` to generate the next token following our text:
|
||||
|
||||
```python
|
||||
# Load pre-trained model (weights)
|
||||
model = GPT2LMHeadModel.from_pretrained('gpt2')
|
||||
|
||||
# Set the model in evaluation mode to desactivate the DropOut modules
|
||||
# This is IMPORTANT to have reproductible results during evaluation!
|
||||
model.eval()
|
||||
|
||||
# If you have a GPU, put everything on cuda
|
||||
tokens_tensor = tokens_tensor.to('cuda')
|
||||
model.to('cuda')
|
||||
|
||||
# Predict all tokens
|
||||
with torch.no_grad():
|
||||
outputs = model(tokens_tensor)
|
||||
predictions = outputs[0]
|
||||
|
||||
# get the predicted next sub-word (in our case, the word 'man')
|
||||
predicted_index = torch.argmax(predictions[0, -1, :]).item()
|
||||
predicted_text = tokenizer.decode(indexed_tokens + [predicted_index])
|
||||
assert predicted_text == 'Who was Jim Henson? Jim Henson was a man'
|
||||
```
|
||||
|
||||
Examples for each model class of each model architecture (Bert, GPT, GPT-2, Transformer-XL, XLNet and XLM) can be found in the [documentation](#documentation).
|
||||
393
docs/source/quicktour.rst
Normal file
393
docs/source/quicktour.rst
Normal file
@ -0,0 +1,393 @@
|
||||
Quick tour
|
||||
==========
|
||||
|
||||
Let's have a quick look at the 🤗 Transformers library features. The library downloads pretrained models for
|
||||
Natural Language Understanding (NLU) tasks, such as analyzing the sentiment of a text, and Natural Language Generation (NLG),
|
||||
such as completing a prompt with new text or translating in another language.
|
||||
|
||||
First we will see how to easily leverage the pipeline API to quickly use those pretrained models at inference. Then, we
|
||||
will dig a little bit more and see how the library gives you access to those models and helps you preprocess your data.
|
||||
|
||||
.. note::
|
||||
|
||||
All code examples presented in the documentation have a switch on the top left for Pytorch versus TensorFlow. If
|
||||
not, the code is expected to work for both backends without any change needed.
|
||||
|
||||
Getting started on a task with a pipeline
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The easiest way to use a pretrained model on a given task is to use :func:`~transformers.pipeline`. 🤗 Transformers
|
||||
provides the following tasks out of the box:
|
||||
|
||||
- Sentiment analysis: is a text positive or negative?
|
||||
- Text generation (in English): provide a prompt and the model will generate what follows.
|
||||
- Name entity recognition (NER): in an input sentence, label each word with the entity it represents (person, place,
|
||||
etc.)
|
||||
- Question answering: provide the model with some context and a question, extract the answer from the context.
|
||||
- Filling masked text: given a text with masked words (e.g., replaced by ``[MASK]``), fill the blanks.
|
||||
- Summarization: generate a summary of a long text.
|
||||
- Translation: translate a text in another language.
|
||||
- Feature extraction: return a tensor representation of the text.
|
||||
|
||||
Let's see how this work for sentiment analysis (the other tasks are all covered in the
|
||||
:doc:`task summary </task_summary>`):
|
||||
|
||||
.. code-block::
|
||||
|
||||
>>> from transformers import pipeline
|
||||
>>> classifier = pipeline('sentiment-analysis')
|
||||
|
||||
When typing this command for the first time, a pretrained model and its tokenizer are downloaded and cached. We will
|
||||
look at both later on, but as an introduction the tokenizer's job is to preprocess the text for the model, which is
|
||||
then responsible for making predictions. The pipeline groups all of that together, and post-process the predictions to
|
||||
make them readable. For instance:
|
||||
|
||||
|
||||
.. code-block::
|
||||
|
||||
>>> classifier('We are very happy to show you the 🤗 Transformers library.')
|
||||
[{'label': 'POSITIVE', 'score': 0.9997795224189758}]
|
||||
|
||||
That's encouraging! You can use it on a list of sentences, which will be preprocessed then fed to the model as a
|
||||
`batch`, returning a list of dictionaries like this one:
|
||||
|
||||
.. code-block::
|
||||
|
||||
>>> results = classifier(["We are very happy to show you the 🤗 Transformers library.",
|
||||
... "We hope you don't hate it."])
|
||||
>>> for result in results:
|
||||
... print(f"label: {result['label']}, with score: {round(result['score'], 4)}")
|
||||
label: POSITIVE, with score: 0.9998
|
||||
label: NEGATIVE, with score: 0.5309
|
||||
|
||||
You can see the second sentence has been classified as negative (it needs to be positive or negative) but its score is
|
||||
fairly neutral.
|
||||
|
||||
By default, the model downloaded for this pipeline is called "distilbert-base-uncased-finetuned-sst-2-english". We can
|
||||
look at its `model page <https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english>`__ to get more
|
||||
information about it. It uses the :doc:`DistilBERT architecture </model_doc/distilbert>` and has been fine-tuned on a
|
||||
dataset called SST-2 for the sentiment analysis task.
|
||||
|
||||
Let's say we want to use another model; for instance, one that has been trained on French data. We can search through
|
||||
the `model hub <https://huggingface.co/models>`__ that gathers models pretrained on a lot of data by research labs, but
|
||||
also community models (usually fine-tuned versions of those big models on a specific dataset). Applying the tags
|
||||
"French" and "text-classification" gives back a suggestion "nlptown/bert-base-multilingual-uncased-sentiment". Let's
|
||||
see how we can use it.
|
||||
|
||||
You can directly pass the name of the model to use to :func:`~transformers.pipeline`:
|
||||
|
||||
.. code-block::
|
||||
|
||||
>>> classifier = pipeline('sentiment-analysis', model="nlptown/bert-base-multilingual-uncased-sentiment")
|
||||
|
||||
This classifier can now deal with texts in English, French, but also Dutch, German, Italian and Spanish! You can also
|
||||
replace that name by a local folder where you have saved a pretrained model (see below). You can also pass a model
|
||||
object and its associated tokenizer.
|
||||
|
||||
We will need two classes for this. The first is :class:`~transformers.AutoTokenizer`, which we will use to download the
|
||||
tokenizer associated to the model we picked and instantiate it. The second is
|
||||
:class:`~transformers.AutoModelForSequenceClassification` (or
|
||||
:class:`~transformers.TFAutoModelForSequenceClassification` if you are using TensorFlow), which we will use to download
|
||||
the model itself. Note that if we were using the library on an other task, the class of the model would change. The
|
||||
:doc:`task summary </task_summary>` tutorial summarizes which class is used for which task.
|
||||
|
||||
.. code-block::
|
||||
|
||||
>>> ## PYTORCH CODE
|
||||
>>> from transformers import AutoTokenizer, AutoModelForSequenceClassification
|
||||
>>> ## TENSORFLOW CODE
|
||||
>>> from transformers import AutoTokenizer, TFAutoModelForSequenceClassification
|
||||
|
||||
Now, to download the models and tokenizer we found previously, we just have to use the
|
||||
:func:`~transformers.AutoModelForSequenceClassification.from_pretrained` method (feel free to replace ``model_name`` by
|
||||
any other model from the model hub):
|
||||
|
||||
.. code-block::
|
||||
|
||||
>>> ## PYTORCH CODE
|
||||
>>> model_name = "nlptown/bert-base-multilingual-uncased-sentiment"
|
||||
>>> model = AutoModelForSequenceClassification.from_pretrained(model_name)
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained(model_name)
|
||||
>>> pipe = pipeline('sentiment-analysis', model=model, tokenizer=tokenizer)
|
||||
>>> ## TENSORFLOW CODE
|
||||
>>> model_name = "nlptown/bert-base-multilingual-uncased-sentiment"
|
||||
>>> # This model only exists in PyTorch, so we use the `from_pt` flag to import that model in TensorFlow.
|
||||
>>> model = TFAutoModelForSequenceClassification.from_pretrained(model_name, from_pt=True)
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained(model_name)
|
||||
>>> classifier = pipeline('sentiment-analysis', model=model, tokenizer=tokenizer)
|
||||
|
||||
If you don't find a model that has been pretrained on some data similar to yours, you will need to fine-tune a
|
||||
pretrained model on your data. We provide :doc:`example scripts </examples>` to do so. Once you're done, don't forget
|
||||
to share your fine-tuned model on the hub with the community, using :doc:`this tutorial </model_sharing>`.
|
||||
|
||||
.. _pretrained-model:
|
||||
|
||||
Under the hood: pretrained models
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Let's now see what happens beneath the hood when using those pipelines. As we saw, the model and tokenizer are created
|
||||
using the :obj:`from_pretrained` method:
|
||||
|
||||
::
|
||||
|
||||
>>> ## PYTORCH CODE
|
||||
>>> from transformers import AutoTokenizer, AutoModelForSequenceClassification
|
||||
>>> model_name = "distilbert-base-uncased-finetuned-sst-2-english"
|
||||
>>> pt_model = AutoModelForSequenceClassification.from_pretrained(model_name)
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained(model_name)
|
||||
>>> ## TENSORFLOW CODE
|
||||
>>> from transformers import AutoTokenizer, TFAutoModelForSequenceClassification
|
||||
>>> model_name = "distilbert-base-uncased-finetuned-sst-2-english"
|
||||
>>> tf_model = TFAutoModelForSequenceClassification.from_pretrained(model_name)
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained(model_name)
|
||||
|
||||
Using the tokenizer
|
||||
^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
We mentioned the tokenizer is responsible for the preprocessing of your texts. First, it will split a given text in
|
||||
words (or part of words, punctuation symbols, etc.) usually called `tokens`. There are multiple rules that can govern
|
||||
that process, which is why we need to instantiate the tokenizer using the name of the model, to make sure we use the
|
||||
same rules as when the model was pretrained.
|
||||
|
||||
The second step is to convert those `tokens` into numbers, to be able to build a tensor out of them and feed them to
|
||||
the model. To do this, the tokenizer has a `vocab`, which is the part we download when we instantiate it with the
|
||||
:obj:`from_pretrained` method, since we need to use the same `vocab` as when the model was pretrained.
|
||||
|
||||
To apply these steps on a given text, we can just feed it to our tokenizer:
|
||||
|
||||
.. code-block::
|
||||
|
||||
>>> inputs = tokenizer("We are very happy to show you the 🤗 Transformers library.")
|
||||
|
||||
This returns a dictionary string to list of ints. It contains the `ids of the tokens <glossary.html#input-ids>`__,
|
||||
as mentioned before, but also additional arguments that will be useful to the model. Here for instance, we also have an
|
||||
`attention mask <glossary.html#attention-mask>`__ that the model will use to have a better understanding of the sequence:
|
||||
|
||||
|
||||
.. code-block::
|
||||
|
||||
>>> print(inputs)
|
||||
{'input_ids': [101, 2057, 2024, 2200, 3407, 2000, 2265, 2017, 1996, 100, 19081, 3075, 1012, 102], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]}
|
||||
|
||||
You can pass a list of sentences directly to your tokenizer. If your goal is to send them through your model as a
|
||||
batch, you probably want to pad them all to the same length, truncate them to the maximum length the model can accept
|
||||
and get tensors back. You can specify all of that to the tokenizer:
|
||||
|
||||
.. code-block::
|
||||
|
||||
>>> ## PYTORCH CODE
|
||||
>>> pt_batch = tokenizer(
|
||||
... ["We are very happy to show you the 🤗 Transformers library.", "We hope you don't hate it."],
|
||||
... padding=True,
|
||||
... truncation=True,
|
||||
... return_tensors="pt"
|
||||
... )
|
||||
>>> ## TENSORFLOW CODE
|
||||
>>> tf_batch = tokenizer(
|
||||
... ["We are very happy to show you the 🤗 Transformers library.", "We hope you don't hate it."],
|
||||
... padding=True,
|
||||
... truncation=True,
|
||||
... return_tensors="tf"
|
||||
... )
|
||||
|
||||
The padding is automatically applied on the side the model expect it (in this case, on the right), with the
|
||||
padding token the model was pretrained with. The attention mask is also adapted to take the padding into account:
|
||||
|
||||
.. code-block::
|
||||
|
||||
>>> ## PYTORCH CODE
|
||||
>>> for key, value in pt_batch.items():
|
||||
... print(f"{key}: {value.numpy().tolist()}")
|
||||
input_ids: [[101, 2057, 2024, 2200, 3407, 2000, 2265, 2017, 1996, 100, 19081, 3075, 1012, 102], [101, 2057, 3246, 2017, 2123, 1005, 1056, 5223, 2009, 1012, 102, 0, 0, 0]]
|
||||
attention_mask: [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0]]
|
||||
>>> ## TENSORFLOW CODE
|
||||
>>> for key, value in tf_batch.items():
|
||||
... print(f"{key}: {value.numpy().tolist()}")
|
||||
input_ids: [[101, 2057, 2024, 2200, 3407, 2000, 2265, 2017, 1996, 100, 19081, 3075, 1012, 102], [101, 2057, 3246, 2017, 2123, 1005, 1056, 5223, 2009, 1012, 102, 0, 0, 0]]
|
||||
attention_mask: [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0]]
|
||||
|
||||
You can learn more about tokenizers :doc:`here <preprocessing>`.
|
||||
|
||||
Using the model
|
||||
^^^^^^^^^^^^^^^
|
||||
|
||||
Once your input has been preprocessed by the tokenizer, you can directly send it to the model. As we mentioned, it will
|
||||
contain all the relevant information the model needs. If you're using a TensorFlow model, you can directly pass the
|
||||
dictionary keys to tensor, for a PyTorch model, you need to unpack the dictionary by adding :obj:`**`.
|
||||
|
||||
.. code-block::
|
||||
|
||||
>>> ## PYTORCH CODE
|
||||
>>> pt_outputs = pt_model(**pt_batch)
|
||||
>>> ## TENSORFLOW CODE
|
||||
>>> tf_outputs = tf_model(tf_batch)
|
||||
|
||||
In 🤗 Transformers, all outputs are tuples (with only one element potentially). Here, we get a tuple with just the
|
||||
final activations of the model.
|
||||
|
||||
.. code-block::
|
||||
|
||||
>>> ## PYTORCH CODE
|
||||
>>> print(pt_outputs)
|
||||
(tensor([[-4.0833, 4.3364],
|
||||
[ 0.0818, -0.0418]], grad_fn=<AddmmBackward>),)
|
||||
>>> ## TENSORFLOW CODE
|
||||
>>> print(tf_outputs)
|
||||
(<tf.Tensor: shape=(2, 2), dtype=float32, numpy=
|
||||
array([[-4.0832963 , 4.3364134 ],
|
||||
[ 0.08181238, -0.04178794]], dtype=float32)>,)
|
||||
|
||||
.. note::
|
||||
|
||||
All 🤗 Transformers models (PyTorch or TensorFlow) return the activations of the model *before* the final
|
||||
activation function (like SoftMax) since this final activation function is often fused with the loss.
|
||||
|
||||
Let's apply the SoftMax activation to get predictions.
|
||||
|
||||
.. code-block::
|
||||
|
||||
>>> ## PYTORCH CODE
|
||||
>>> import torch.nn.functional as F
|
||||
>>> pt_predictions = F.softmax(pt_outputs[0], dim=-1)
|
||||
>>> ## TENSORFLOW CODE
|
||||
>>> import tensorflow as tf
|
||||
>>> tf_predictions = tf.nn.softmax(tf_outputs[0], axis=-1)
|
||||
|
||||
We can see we get the numbers from before:
|
||||
|
||||
.. code-block::
|
||||
|
||||
>>> ## TENSORFLOW CODE
|
||||
>>> print(tf_predictions)
|
||||
tf.Tensor(
|
||||
[[2.2042994e-04 9.9977952e-01]
|
||||
[5.3086078e-01 4.6913919e-01]], shape=(2, 2), dtype=float32)
|
||||
>>> ## PYTORCH CODE
|
||||
>>> print(pt_predictions)
|
||||
tensor([[2.2043e-04, 9.9978e-01],
|
||||
[5.3086e-01, 4.6914e-01]], grad_fn=<SoftmaxBackward>)
|
||||
|
||||
If you have labels, you can provide them to the model, it will return a tuple with the loss and the final activations.
|
||||
|
||||
.. code-block::
|
||||
|
||||
>>> ## PYTORCH CODE
|
||||
>>> import torch
|
||||
>>> pt_outputs = pt_model(**pt_batch, labels = torch.tensor([1, 0]))
|
||||
>>> ## TENSORFLOW CODE
|
||||
>>> import tensorflow as tf
|
||||
>>> tf_outputs = tf_model(tf_batch, labels = tf.constant([1, 0]))
|
||||
|
||||
Models are standard `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__ or
|
||||
`tf.keras.Model <https://www.tensorflow.org/api_docs/python/tf/keras/Model>`__ so you can use them in your usual
|
||||
training loop. 🤗 Transformers also provides a :class:`~transformers.Trainer` (or :class:`~transformers.TFTrainer` if
|
||||
you are using TensorFlow) class to help with your training (taking care of things such as distributed training, mixed
|
||||
precision, etc.). See the :doc:`training tutorial <training>` for more details.
|
||||
|
||||
Once your model is fine-tuned, you can save it with its tokenizer the following way:
|
||||
|
||||
::
|
||||
|
||||
tokenizer.save_pretrained(save_directory)
|
||||
model.save_pretrained(save_directory)
|
||||
|
||||
You can then load this model back using the :func:`~transformers.AutoModel.from_pretrained` method by passing the
|
||||
directory name instead of the model name. One cool feature of 🤗 Transformers is that you can easily switch between
|
||||
PyTorch and TensorFlow: any model saved as before can be loaded back either in PyTorch or TensorFlow. If you are
|
||||
loading a saved PyTorch model in a TensorFlow model, use :func:`~transformers.TFAutoModel.from_pretrained` like this:
|
||||
|
||||
::
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained(save_directory)
|
||||
model = TFAutoModel.from_pretrained(save_directory, from_pt=True)
|
||||
|
||||
and if you are loading a saved TensorFlow model in a PyTorch model, you should use the following code:
|
||||
|
||||
::
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained(save_directory)
|
||||
model = AutoModel.from_pretrained(save_directory, from_tf=True)
|
||||
|
||||
Lastly, you can also ask the model to return all hidden states and all attention weights if you need them:
|
||||
|
||||
|
||||
::
|
||||
|
||||
>>> ## PYTORCH CODE
|
||||
>>> pt_outputs = pt_model(**pt_batch, output_hidden_states=True, output_attentions=True)
|
||||
>>> all_hidden_states, all_attentions = pt_outputs[-2:]
|
||||
>>> ## TENSORFLOW CODE
|
||||
>>> tf_outputs = tf_model(tf_batch, output_hidden_states=True, output_attentions=True)
|
||||
>>> all_hidden_states, all_attentions = tf_outputs[-2:]
|
||||
|
||||
Accessing the code
|
||||
^^^^^^^^^^^^^^^^^^
|
||||
|
||||
The :obj:`AutoModel` and :obj:`AutoTokenizer` classes are just shortcuts that will automatically work with any
|
||||
pretrained model. Behind the scenes, the library has one model class per combination of architecture plus class, so the
|
||||
code is easy to access and tweak if you need to.
|
||||
|
||||
In our previous example, the model was called "distilbert-base-uncased-finetuned-sst-2-english", which means it's
|
||||
using the :doc:`DistilBERT </model_doc/distilbert>` architecture. The model automatically created is then a
|
||||
:class:`~transformers.DistilBertForSequenceClassification`. You can look at its documentation for all details relevant
|
||||
to that specific model, or browse the source code. This is how you would directly instantiate model and tokenizer
|
||||
without the auto magic:
|
||||
|
||||
.. code-block::
|
||||
|
||||
>>> ## PYTORCH CODE
|
||||
>>> from transformers import DistilBertTokenizer, DistilBertForSequenceClassification
|
||||
>>> model_name = "distilbert-base-uncased-finetuned-sst-2-english"
|
||||
>>> model = DistilBertForSequenceClassification.from_pretrained(model_name)
|
||||
>>> tokenizer = DistilBertTokenizer.from_pretrained(model_name)
|
||||
>>> ## TENSORFLOW CODE
|
||||
>>> from transformers import DistilBertTokenizer, TFDistilBertForSequenceClassification
|
||||
>>> model_name = "distilbert-base-uncased-finetuned-sst-2-english"
|
||||
>>> model = TFDistilBertForSequenceClassification.from_pretrained(model_name)
|
||||
>>> tokenizer = DistilBertTokenizer.from_pretrained(model_name)
|
||||
|
||||
Customizing the model
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
If you want to change how the model itself is built, you can define your custom configuration class. Each architecture
|
||||
comes with its own relevant configuration (in the case of DistilBERT, :class:`~transformers.DistilBertConfig`) which
|
||||
allows you to specify any of the hidden dimension, dropout rate etc. If you do core modifications, like changing the
|
||||
hidden size, you won't be able to use a pretrained model anymore and will need to train from scratch. You would then
|
||||
instantiate the model directly from this configuration.
|
||||
|
||||
Here we use the predefined vocabulary of DistilBERT (hence load the tokenizer with the
|
||||
:func:`~transformers.DistilBertTokenizer.from_pretrained` method) and initialize the model from scratch (hence
|
||||
instantiate the model from the configuration instead of using the
|
||||
:func:`~transformers.DistilBertForSequenceClassification.from_pretrained` method).
|
||||
|
||||
.. code-block::
|
||||
|
||||
>>> ## PYTORCH CODE
|
||||
>>> from transformers import DistilBertConfig, DistilBertTokenizer, DistilBertForSequenceClassification
|
||||
>>> config = DistilBertConfig(n_heads=8, dim=512, hidden_dim=4*512)
|
||||
>>> tokenizer = DistilBertTokenizer.from_pretrained('distilbert-base-uncased')
|
||||
>>> model = DistilBertForSequenceClassification(config)
|
||||
>>> ## TENSORFLOW CODE
|
||||
>>> from transformers import DistilBertConfig, DistilBertTokenizer, TFDistilBertForSequenceClassification
|
||||
>>> config = DistilBertConfig(n_heads=8, dim=512, hidden_dim=4*512)
|
||||
>>> tokenizer = DistilBertTokenizer.from_pretrained('distilbert-base-uncased')
|
||||
>>> model = TFDistilBertForSequenceClassification(config)
|
||||
|
||||
For something that only changes the head of the model (for instance, the number of labels), you can still use a
|
||||
pretrained model for the body. For instance, let's define a classifier for 10 different labels using a pretrained body.
|
||||
We could create a configuration with all the default values and just change the number of labels, but more easily, you
|
||||
can directly pass any argument a configuration would take to the :func:`from_pretrained` method and it will update the
|
||||
default configuration with it:
|
||||
|
||||
.. code-block::
|
||||
|
||||
>>> ## PYTORCH CODE
|
||||
>>> from transformers import DistilBertConfig, DistilBertTokenizer, DistilBertForSequenceClassification
|
||||
>>> model_name = "distilbert-base-uncased"
|
||||
>>> model = DistilBertForSequenceClassification.from_pretrained(model_name, num_labels=10)
|
||||
>>> tokenizer = DistilBertTokenizer.from_pretrained(model_name)
|
||||
>>> ## TENSORFLOW CODE
|
||||
>>> from transformers import DistilBertConfig, DistilBertTokenizer, TFDistilBertForSequenceClassification
|
||||
>>> model_name = "distilbert-base-uncased"
|
||||
>>> model = TFDistilBertForSequenceClassification.from_pretrained(model_name, num_labels=10)
|
||||
>>> tokenizer = DistilBertTokenizer.from_pretrained(model_name)
|
||||
@ -1,188 +0,0 @@
|
||||
Loading Google AI or OpenAI pre-trained weights or PyTorch dump
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
``from_pretrained()`` method
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
To load one of Google AI's, OpenAI's pre-trained models or a PyTorch saved model (an instance of ``BertForPreTraining`` saved with ``torch.save()``\ ), the PyTorch model classes and the tokenizer can be instantiated using the ``from_pretrained()`` method:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
model = BERT_CLASS.from_pretrained(PRE_TRAINED_MODEL_NAME_OR_PATH, cache_dir=None, from_tf=False, state_dict=None, *input, **kwargs)
|
||||
|
||||
where
|
||||
|
||||
|
||||
* ``BERT_CLASS`` is either a tokenizer to load the vocabulary (\ ``BertTokenizer`` or ``OpenAIGPTTokenizer`` classes) or one of the eight BERT or three OpenAI GPT PyTorch model classes (to load the pre-trained weights): ``BertModel``\ , ``BertForMaskedLM``\ , ``BertForNextSentencePrediction``\ , ``BertForPreTraining``\ , ``BertForSequenceClassification``\ , ``BertForTokenClassification``\ , ``BertForMultipleChoice``\ , ``BertForQuestionAnswering``\ , ``OpenAIGPTModel``\ , ``OpenAIGPTLMHeadModel`` or ``OpenAIGPTDoubleHeadsModel``\ , and
|
||||
*
|
||||
``PRE_TRAINED_MODEL_NAME_OR_PATH`` is either:
|
||||
|
||||
|
||||
*
|
||||
the shortcut name of a Google AI's or OpenAI's pre-trained model selected in the list:
|
||||
|
||||
|
||||
* ``bert-base-uncased``: 12-layer, 768-hidden, 12-heads, 110M parameters
|
||||
* ``bert-large-uncased``: 24-layer, 1024-hidden, 16-heads, 340M parameters
|
||||
* ``bert-base-cased``: 12-layer, 768-hidden, 12-heads , 110M parameters
|
||||
* ``bert-large-cased``: 24-layer, 1024-hidden, 16-heads, 340M parameters
|
||||
* ``bert-base-multilingual-uncased``: (Orig, not recommended) 102 languages, 12-layer, 768-hidden, 12-heads, 110M parameters
|
||||
* ``bert-base-multilingual-cased``: **(New, recommended)** 104 languages, 12-layer, 768-hidden, 12-heads, 110M parameters
|
||||
* ``bert-base-chinese``: Chinese Simplified and Traditional, 12-layer, 768-hidden, 12-heads, 110M parameters
|
||||
* ``bert-base-german-cased``: Trained on German data only, 12-layer, 768-hidden, 12-heads, 110M parameters `Performance Evaluation <https://deepset.ai/german-bert>`__
|
||||
* ``bert-large-uncased-whole-word-masking``: 24-layer, 1024-hidden, 16-heads, 340M parameters - Trained with Whole Word Masking (mask all of the the tokens corresponding to a word at once)
|
||||
* ``bert-large-cased-whole-word-masking``: 24-layer, 1024-hidden, 16-heads, 340M parameters - Trained with Whole Word Masking (mask all of the the tokens corresponding to a word at once)
|
||||
* ``bert-large-uncased-whole-word-masking-finetuned-squad``: The ``bert-large-uncased-whole-word-masking`` model finetuned on SQuAD (using the ``run_bert_squad.py`` examples). Results: *exact_match: 86.91579943235573, f1: 93.1532499015869*
|
||||
* ``openai-gpt``: OpenAI GPT English model, 12-layer, 768-hidden, 12-heads, 110M parameters
|
||||
* ``gpt2``: OpenAI GPT-2 English model, 12-layer, 768-hidden, 12-heads, 117M parameters
|
||||
* ``gpt2-medium``: OpenAI GPT-2 English model, 24-layer, 1024-hidden, 16-heads, 345M parameters
|
||||
* ``transfo-xl-wt103``: Transformer-XL English model trained on wikitext-103, 18-layer, 1024-hidden, 16-heads, 257M parameters
|
||||
|
||||
*
|
||||
a path or url to a pretrained model archive containing:
|
||||
|
||||
|
||||
* ``bert_config.json`` or ``openai_gpt_config.json`` a configuration file for the model, and
|
||||
* ``pytorch_model.bin`` a PyTorch dump of a pre-trained instance of ``BertForPreTraining``\ , ``OpenAIGPTModel``\ , ``TransfoXLModel``\ , ``GPT2LMHeadModel`` (saved with the usual ``torch.save()``\ )
|
||||
|
||||
If ``PRE_TRAINED_MODEL_NAME_OR_PATH`` is a shortcut name, the pre-trained weights will be downloaded from AWS S3 (see the links `here <https://github.com/huggingface/transformers/blob/master/transformers/modeling_bert.py>`__\ ) and stored in a cache folder to avoid future download (the cache folder can be found at ``~/.pytorch_pretrained_bert/``\ ).
|
||||
|
||||
*
|
||||
``cache_dir`` can be an optional path to a specific directory to download and cache the pre-trained model weights. This option is useful in particular when you are using distributed training: to avoid concurrent access to the same weights you can set for example ``cache_dir='./pretrained_model_{}'.format(args.local_rank)`` (see the section on distributed training for more information).
|
||||
|
||||
* ``from_tf``\ : should we load the weights from a locally saved TensorFlow checkpoint
|
||||
* ``state_dict``\ : an optional state dictionary (collections.OrderedDict object) to use instead of Google pre-trained models
|
||||
* ``*inputs``\ , `**kwargs`: additional input for the specific Bert class (ex: num_labels for BertForSequenceClassification)
|
||||
|
||||
``Uncased`` means that the text has been lowercased before WordPiece tokenization, e.g., ``John Smith`` becomes ``john smith``. The Uncased model also strips out any accent markers. ``Cased`` means that the true case and accent markers are preserved. Typically, the Uncased model is better unless you know that case information is important for your task (e.g., Named Entity Recognition or Part-of-Speech tagging). For information about the Multilingual and Chinese model, see the `Multilingual README <https://github.com/google-research/bert/blob/master/multilingual.md>`__ or the original TensorFlow repository.
|
||||
|
||||
When using an ``uncased model``\ , make sure to pass ``--do_lower_case`` to the example training scripts (or pass ``do_lower_case=True`` to FullTokenizer if you're using your own script and loading the tokenizer your-self.).
|
||||
|
||||
Examples:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# BERT
|
||||
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True, do_basic_tokenize=True)
|
||||
model = BertForSequenceClassification.from_pretrained('bert-base-uncased')
|
||||
|
||||
# OpenAI GPT
|
||||
tokenizer = OpenAIGPTTokenizer.from_pretrained('openai-gpt')
|
||||
model = OpenAIGPTModel.from_pretrained('openai-gpt')
|
||||
|
||||
# Transformer-XL
|
||||
tokenizer = TransfoXLTokenizer.from_pretrained('transfo-xl-wt103')
|
||||
model = TransfoXLModel.from_pretrained('transfo-xl-wt103')
|
||||
|
||||
# OpenAI GPT-2
|
||||
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
|
||||
model = GPT2Model.from_pretrained('gpt2')
|
||||
|
||||
Cache directory
|
||||
~~~~~~~~~~~~~~~
|
||||
|
||||
``pytorch_pretrained_bert`` save the pretrained weights in a cache directory which is located at (in this order of priority):
|
||||
|
||||
|
||||
* ``cache_dir`` optional arguments to the ``from_pretrained()`` method (see above),
|
||||
* shell environment variable ``PYTORCH_PRETRAINED_BERT_CACHE``\ ,
|
||||
* PyTorch cache home + ``/pytorch_pretrained_bert/``
|
||||
where PyTorch cache home is defined by (in this order):
|
||||
|
||||
* shell environment variable ``ENV_TORCH_HOME``
|
||||
* shell environment variable ``ENV_XDG_CACHE_HOME`` + ``/torch/``\ )
|
||||
* default: ``~/.cache/torch/``
|
||||
|
||||
Usually, if you don't set any specific environment variable, ``pytorch_pretrained_bert`` cache will be at ``~/.cache/torch/pytorch_pretrained_bert/``.
|
||||
|
||||
You can alsways safely delete ``pytorch_pretrained_bert`` cache but the pretrained model weights and vocabulary files wil have to be re-downloaded from our S3.
|
||||
|
||||
Serialization best-practices
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
This section explain how you can save and re-load a fine-tuned model (BERT, GPT, GPT-2 and Transformer-XL).
|
||||
There are three types of files you need to save to be able to reload a fine-tuned model:
|
||||
|
||||
|
||||
* the model it-self which should be saved following PyTorch serialization `best practices <https://pytorch.org/docs/stable/notes/serialization.html#best-practices>`__\ ,
|
||||
* the configuration file of the model which is saved as a JSON file, and
|
||||
* the vocabulary (and the merges for the BPE-based models GPT and GPT-2).
|
||||
|
||||
The *default filenames* of these files are as follow:
|
||||
|
||||
|
||||
* the model weights file: ``pytorch_model.bin``\ ,
|
||||
* the configuration file: ``config.json``\ ,
|
||||
* the vocabulary file: ``vocab.txt`` for BERT and Transformer-XL, ``vocab.json`` for GPT/GPT-2 (BPE vocabulary),
|
||||
* for GPT/GPT-2 (BPE vocabulary) the additional merges file: ``merges.txt``.
|
||||
|
||||
**If you save a model using these *default filenames*\ , you can then re-load the model and tokenizer using the ``from_pretrained()`` method.**
|
||||
|
||||
Here is the recommended way of saving the model, configuration and vocabulary to an ``output_dir`` directory and reloading the model and tokenizer afterwards:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from transformers import WEIGHTS_NAME, CONFIG_NAME
|
||||
|
||||
output_dir = "./models/"
|
||||
|
||||
# Step 1: Save a model, configuration and vocabulary that you have fine-tuned
|
||||
|
||||
# If we have a distributed model, save only the encapsulated model
|
||||
# (it was wrapped in PyTorch DistributedDataParallel or DataParallel)
|
||||
model_to_save = model.module if hasattr(model, 'module') else model
|
||||
|
||||
# If we save using the predefined names, we can load using `from_pretrained`
|
||||
output_model_file = os.path.join(output_dir, WEIGHTS_NAME)
|
||||
output_config_file = os.path.join(output_dir, CONFIG_NAME)
|
||||
|
||||
torch.save(model_to_save.state_dict(), output_model_file)
|
||||
model_to_save.config.to_json_file(output_config_file)
|
||||
tokenizer.save_vocabulary(output_dir)
|
||||
|
||||
# Step 2: Re-load the saved model and vocabulary
|
||||
|
||||
# Example for a Bert model
|
||||
model = BertForQuestionAnswering.from_pretrained(output_dir)
|
||||
tokenizer = BertTokenizer.from_pretrained(output_dir, do_lower_case=args.do_lower_case) # Add specific options if needed
|
||||
# Example for a GPT model
|
||||
model = OpenAIGPTDoubleHeadsModel.from_pretrained(output_dir)
|
||||
tokenizer = OpenAIGPTTokenizer.from_pretrained(output_dir)
|
||||
|
||||
Here is another way you can save and reload the model if you want to use specific paths for each type of files:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
output_model_file = "./models/my_own_model_file.bin"
|
||||
output_config_file = "./models/my_own_config_file.bin"
|
||||
output_vocab_file = "./models/my_own_vocab_file.bin"
|
||||
|
||||
# Step 1: Save a model, configuration and vocabulary that you have fine-tuned
|
||||
|
||||
# If we have a distributed model, save only the encapsulated model
|
||||
# (it was wrapped in PyTorch DistributedDataParallel or DataParallel)
|
||||
model_to_save = model.module if hasattr(model, 'module') else model
|
||||
|
||||
torch.save(model_to_save.state_dict(), output_model_file)
|
||||
model_to_save.config.to_json_file(output_config_file)
|
||||
tokenizer.save_vocabulary(output_vocab_file)
|
||||
|
||||
# Step 2: Re-load the saved model and vocabulary
|
||||
|
||||
# We didn't save using the predefined WEIGHTS_NAME, CONFIG_NAME names, we cannot load using `from_pretrained`.
|
||||
# Here is how to do it in this situation:
|
||||
|
||||
# Example for a Bert model
|
||||
config = BertConfig.from_json_file(output_config_file)
|
||||
model = BertForQuestionAnswering(config)
|
||||
state_dict = torch.load(output_model_file)
|
||||
model.load_state_dict(state_dict)
|
||||
tokenizer = BertTokenizer(output_vocab_file, do_lower_case=args.do_lower_case)
|
||||
|
||||
# Example for a GPT model
|
||||
config = OpenAIGPTConfig.from_json_file(output_config_file)
|
||||
model = OpenAIGPTDoubleHeadsModel(config)
|
||||
state_dict = torch.load(output_model_file)
|
||||
model.load_state_dict(state_dict)
|
||||
tokenizer = OpenAIGPTTokenizer(output_vocab_file)
|
||||
|
||||
845
docs/source/task_summary.rst
Normal file
845
docs/source/task_summary.rst
Normal file
@ -0,0 +1,845 @@
|
||||
Summary of the tasks
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
This page shows the most frequent use-cases when using the library. The models available allow for many different
|
||||
configurations and a great versatility in use-cases. The most simple ones are presented here, showcasing usage
|
||||
for tasks such as question answering, sequence classification, named entity recognition and others.
|
||||
|
||||
These examples leverage auto-models, which are classes that will instantiate a model according to a given checkpoint,
|
||||
automatically selecting the correct model architecture. Please check the :class:`~transformers.AutoModel` documentation
|
||||
for more information.
|
||||
Feel free to modify the code to be more specific and adapt it to your specific use-case.
|
||||
|
||||
In order for a model to perform well on a task, it must be loaded from a checkpoint corresponding to that task. These
|
||||
checkpoints are usually pre-trained on a large corpus of data and fine-tuned on a specific task. This means the
|
||||
following:
|
||||
|
||||
- Not all models were fine-tuned on all tasks. If you want to fine-tune a model on a specific task, you can leverage
|
||||
one of the `run_$TASK.py` script in the
|
||||
`examples <https://github.com/huggingface/transformers/tree/master/examples>`_ directory.
|
||||
- Fine-tuned models were fine-tuned on a specific dataset. This dataset may or may not overlap with your use-case
|
||||
and domain. As mentioned previously, you may leverage the
|
||||
`examples <https://github.com/huggingface/transformers/tree/master/examples>`_ scripts to fine-tune your model, or you
|
||||
may create your own training script.
|
||||
|
||||
In order to do an inference on a task, several mechanisms are made available by the library:
|
||||
|
||||
- Pipelines: very easy-to-use abstractions, which require as little as two lines of code.
|
||||
- Using a model directly with a tokenizer (PyTorch/TensorFlow): the full inference using the model. Less abstraction,
|
||||
but much more powerful.
|
||||
|
||||
Both approaches are showcased here.
|
||||
|
||||
.. note::
|
||||
|
||||
All tasks presented here leverage pre-trained checkpoints that were fine-tuned on specific tasks. Loading a
|
||||
checkpoint that was not fine-tuned on a specific task would load only the base transformer layers and not the
|
||||
additional head that is used for the task, initializing the weights of that head randomly.
|
||||
|
||||
This would produce random output.
|
||||
|
||||
Sequence Classification
|
||||
--------------------------
|
||||
|
||||
Sequence classification is the task of classifying sequences according to a given number of classes. An example
|
||||
of sequence classification is the GLUE dataset, which is entirely based on that task. If you would like to fine-tune
|
||||
a model on a GLUE sequence classification task, you may leverage the
|
||||
`run_glue.py <https://github.com/huggingface/transformers/tree/master/examples/text-classification/run_glue.py>`_ or
|
||||
`run_tf_glue.py <https://github.com/huggingface/transformers/tree/master/examples/text-classification/run_tf_glue.py>`_ scripts.
|
||||
|
||||
Here is an example using the pipelines do to sentiment analysis: identifying if a sequence is positive or negative.
|
||||
It leverages a fine-tuned model on sst2, which is a GLUE task.
|
||||
|
||||
This returns a label ("POSITIVE" or "NEGATIVE") alongside a score, as follows:
|
||||
|
||||
.. code-block::
|
||||
|
||||
>>> from transformers import pipeline
|
||||
|
||||
>>> nlp = pipeline("sentiment-analysis")
|
||||
|
||||
>>> result = nlp("I hate you")[0]
|
||||
>>> print(f"label: {result['label']}, with score: {round(result['score'], 4)}")
|
||||
label: NEGATIVE, with score: 0.9991
|
||||
|
||||
>>> result = nlp("I love you")[0]
|
||||
>>> print(f"label: {result['label']}, with score: {round(result['score'], 4)}")
|
||||
label: POSITIVE, with score: 0.9999
|
||||
|
||||
|
||||
Here is an example of doing a sequence classification using a model to determine if two sequences are paraphrases
|
||||
of each other. The process is the following:
|
||||
|
||||
- Instantiate a tokenizer and a model from the checkpoint name. The model is identified as a BERT model and loads it
|
||||
with the weights stored in the checkpoint.
|
||||
- Build a sequence from the two sentences, with the correct model-specific separators token type ids
|
||||
and attention masks (:func:`~transformers.PreTrainedTokenizer.encode` and
|
||||
:func:`~transformers.PreTrainedTokenizer.__call__` take care of this)
|
||||
- Pass this sequence through the model so that it is classified in one of the two available classes: 0
|
||||
(not a paraphrase) and 1 (is a paraphrase)
|
||||
- Compute the softmax of the result to get probabilities over the classes
|
||||
- Print the results
|
||||
|
||||
.. code-block::
|
||||
|
||||
>>> ## PYTORCH CODE
|
||||
>>> from transformers import AutoTokenizer, AutoModelForSequenceClassification
|
||||
>>> import torch
|
||||
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased-finetuned-mrpc")
|
||||
>>> model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased-finetuned-mrpc")
|
||||
|
||||
>>> classes = ["not paraphrase", "is paraphrase"]
|
||||
|
||||
>>> sequence_0 = "The company HuggingFace is based in New York City"
|
||||
>>> sequence_1 = "Apples are especially bad for your health"
|
||||
>>> sequence_2 = "HuggingFace's headquarters are situated in Manhattan"
|
||||
|
||||
>>> paraphrase = tokenizer(sequence_0, sequence_2, return_tensors="pt")
|
||||
>>> not_paraphrase = tokenizer(sequence_0, sequence_1, return_tensors="pt")
|
||||
|
||||
>>> paraphrase_classification_logits = model(**paraphrase)[0]
|
||||
>>> not_paraphrase_classification_logits = model(**not_paraphrase)[0]
|
||||
|
||||
>>> paraphrase_results = torch.softmax(paraphrase_classification_logits, dim=1).tolist()[0]
|
||||
>>> not_paraphrase_results = torch.softmax(not_paraphrase_classification_logits, dim=1).tolist()[0]
|
||||
|
||||
>>> # Should be paraphrase
|
||||
>>> for i in range(len(classes)):
|
||||
... print(f"{classes[i]}: {int(round(paraphrase_results[i] * 100))}%")
|
||||
not paraphrase: 10%
|
||||
is paraphrase: 90%
|
||||
|
||||
>>> # Should not be paraphrase
|
||||
>>> for i in range(len(classes)):
|
||||
... print(f"{classes[i]}: {int(round(not_paraphrase_results[i] * 100))}%")
|
||||
not paraphrase: 94%
|
||||
is paraphrase: 6%
|
||||
>>> ## TENSORFLOW CODE
|
||||
>>> from transformers import AutoTokenizer, TFAutoModelForSequenceClassification
|
||||
>>> import tensorflow as tf
|
||||
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased-finetuned-mrpc")
|
||||
>>> model = TFAutoModelForSequenceClassification.from_pretrained("bert-base-cased-finetuned-mrpc")
|
||||
|
||||
>>> classes = ["not paraphrase", "is paraphrase"]
|
||||
|
||||
>>> sequence_0 = "The company HuggingFace is based in New York City"
|
||||
>>> sequence_1 = "Apples are especially bad for your health"
|
||||
>>> sequence_2 = "HuggingFace's headquarters are situated in Manhattan"
|
||||
|
||||
>>> paraphrase = tokenizer(sequence_0, sequence_2, return_tensors="tf")
|
||||
>>> not_paraphrase = tokenizer(sequence_0, sequence_1, return_tensors="tf")
|
||||
|
||||
>>> paraphrase_classification_logits = model(paraphrase)[0]
|
||||
>>> not_paraphrase_classification_logits = model(not_paraphrase)[0]
|
||||
|
||||
>>> paraphrase_results = tf.nn.softmax(paraphrase_classification_logits, axis=1).numpy()[0]
|
||||
>>> not_paraphrase_results = tf.nn.softmax(not_paraphrase_classification_logits, axis=1).numpy()[0]
|
||||
|
||||
>>> # Should be paraphrase
|
||||
>>> for i in range(len(classes)):
|
||||
... print(f"{classes[i]}: {int(round(paraphrase_results[i] * 100))}%")
|
||||
not paraphrase: 10%
|
||||
is paraphrase: 90%
|
||||
|
||||
>>> # Should not be paraphrase
|
||||
>>> for i in range(len(classes)):
|
||||
... print(f"{classes[i]}: {int(round(not_paraphrase_results[i] * 100))}%")
|
||||
not paraphrase: 94%
|
||||
is paraphrase: 6%
|
||||
|
||||
Extractive Question Answering
|
||||
----------------------------------------------------
|
||||
|
||||
Extractive Question Answering is the task of extracting an answer from a text given a question. An example of a
|
||||
question answering dataset is the SQuAD dataset, which is entirely based on that task. If you would like to fine-tune
|
||||
a model on a SQuAD task, you may leverage the `run_squad.py`.
|
||||
|
||||
Here is an example using the pipelines do to question answering: extracting an answer from a text given a question.
|
||||
It leverages a fine-tuned model on SQuAD.
|
||||
|
||||
.. code-block::
|
||||
|
||||
>>> from transformers import pipeline
|
||||
|
||||
>>> nlp = pipeline("question-answering")
|
||||
|
||||
>>> context = r"""
|
||||
... Extractive Question Answering is the task of extracting an answer from a text given a question. An example of a
|
||||
... question answering dataset is the SQuAD dataset, which is entirely based on that task. If you would like to fine-tune
|
||||
... a model on a SQuAD task, you may leverage the examples/question-answering/run_squad.py script.
|
||||
... """
|
||||
|
||||
This returns an answer extracted from the text, a confidence score, alongside "start" and "end" values which
|
||||
are the positions of the extracted answer in the text.
|
||||
|
||||
.. code-block::
|
||||
|
||||
>>> result = nlp(question="What is extractive question answering?", context=context)
|
||||
>>> print(f"Answer: '{result['answer']}', score: {round(result['score'], 4)}, start: {result['start']}, end: {result['end']}")
|
||||
Answer: 'the task of extracting an answer from a text given a question.', score: 0.6226, start: 34, end: 96
|
||||
|
||||
>>> result = nlp(question="What is a good example of a question answering dataset?", context=context)
|
||||
>>> print(f"Answer: '{result['answer']}', score: {round(result['score'], 4)}, start: {result['start']}, end: {result['end']}")
|
||||
Answer: 'SQuAD dataset,', score: 0.5053, start: 147, end: 161
|
||||
|
||||
|
||||
Here is an example of question answering using a model and a tokenizer. The process is the following:
|
||||
|
||||
- Instantiate a tokenizer and a model from the checkpoint name. The model is identified as a BERT model and loads it
|
||||
with the weights stored in the checkpoint.
|
||||
- Define a text and a few questions.
|
||||
- Iterate over the questions and build a sequence from the text and the current question, with the correct
|
||||
model-specific separators token type ids and attention masks
|
||||
- Pass this sequence through the model. This outputs a range of scores across the entire sequence tokens (question and
|
||||
text), for both the start and end positions.
|
||||
- Compute the softmax of the result to get probabilities over the tokens
|
||||
- Fetch the tokens from the identified start and stop values, convert those tokens to a string.
|
||||
- Print the results
|
||||
|
||||
.. code-block::
|
||||
|
||||
>>> ## PYTORCH CODE
|
||||
>>> from transformers import AutoTokenizer, AutoModelForQuestionAnswering
|
||||
>>> import torch
|
||||
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("bert-large-uncased-whole-word-masking-finetuned-squad")
|
||||
>>> model = AutoModelForQuestionAnswering.from_pretrained("bert-large-uncased-whole-word-masking-finetuned-squad")
|
||||
|
||||
>>> text = r"""
|
||||
... 🤗 Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides general-purpose
|
||||
... architectures (BERT, GPT-2, RoBERTa, XLM, DistilBert, XLNet…) for Natural Language Understanding (NLU) and Natural
|
||||
... Language Generation (NLG) with over 32+ pretrained models in 100+ languages and deep interoperability between
|
||||
... TensorFlow 2.0 and PyTorch.
|
||||
... """
|
||||
|
||||
>>> questions = [
|
||||
... "How many pretrained models are available in 🤗 Transformers?",
|
||||
... "What does 🤗 Transformers provide?",
|
||||
... "🤗 Transformers provides interoperability between which frameworks?",
|
||||
... ]
|
||||
|
||||
>>> for question in questions:
|
||||
... inputs = tokenizer(question, text, add_special_tokens=True, return_tensors="pt")
|
||||
... input_ids = inputs["input_ids"].tolist()[0]
|
||||
...
|
||||
... text_tokens = tokenizer.convert_ids_to_tokens(input_ids)
|
||||
... answer_start_scores, answer_end_scores = model(**inputs)
|
||||
...
|
||||
... answer_start = torch.argmax(
|
||||
... answer_start_scores
|
||||
... ) # Get the most likely beginning of answer with the argmax of the score
|
||||
... answer_end = torch.argmax(answer_end_scores) + 1 # Get the most likely end of answer with the argmax of the score
|
||||
...
|
||||
... answer = tokenizer.convert_tokens_to_string(tokenizer.convert_ids_to_tokens(input_ids[answer_start:answer_end]))
|
||||
...
|
||||
... print(f"Question: {question}")
|
||||
... print(f"Answer: {answer}")
|
||||
Question: How many pretrained models are available in 🤗 Transformers?
|
||||
Answer: over 32 +
|
||||
Question: What does 🤗 Transformers provide?
|
||||
Answer: general - purpose architectures
|
||||
Question: 🤗 Transformers provides interoperability between which frameworks?
|
||||
Answer: tensorflow 2 . 0 and pytorch
|
||||
>>> ## TENSORFLOW CODE
|
||||
>>> from transformers import AutoTokenizer, TFAutoModelForQuestionAnswering
|
||||
>>> import tensorflow as tf
|
||||
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("bert-large-uncased-whole-word-masking-finetuned-squad")
|
||||
>>> model = TFAutoModelForQuestionAnswering.from_pretrained("bert-large-uncased-whole-word-masking-finetuned-squad")
|
||||
|
||||
>>> text = r"""
|
||||
... 🤗 Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides general-purpose
|
||||
... architectures (BERT, GPT-2, RoBERTa, XLM, DistilBert, XLNet…) for Natural Language Understanding (NLU) and Natural
|
||||
... Language Generation (NLG) with over 32+ pretrained models in 100+ languages and deep interoperability between
|
||||
... TensorFlow 2.0 and PyTorch.
|
||||
... """
|
||||
|
||||
>>> questions = [
|
||||
... "How many pretrained models are available in 🤗 Transformers?",
|
||||
... "What does 🤗 Transformers provide?",
|
||||
... "🤗 Transformers provides interoperability between which frameworks?",
|
||||
... ]
|
||||
|
||||
>>> for question in questions:
|
||||
... inputs = tokenizer(question, text, add_special_tokens=True, return_tensors="tf")
|
||||
... input_ids = inputs["input_ids"].numpy()[0]
|
||||
...
|
||||
... text_tokens = tokenizer.convert_ids_to_tokens(input_ids)
|
||||
... answer_start_scores, answer_end_scores = model(inputs)
|
||||
...
|
||||
... answer_start = tf.argmax(
|
||||
... answer_start_scores, axis=1
|
||||
... ).numpy()[0] # Get the most likely beginning of answer with the argmax of the score
|
||||
... answer_end = (
|
||||
... tf.argmax(answer_end_scores, axis=1) + 1
|
||||
... ).numpy()[0] # Get the most likely end of answer with the argmax of the score
|
||||
... answer = tokenizer.convert_tokens_to_string(tokenizer.convert_ids_to_tokens(input_ids[answer_start:answer_end]))
|
||||
...
|
||||
... print(f"Question: {question}")
|
||||
... print(f"Answer: {answer}")
|
||||
Question: How many pretrained models are available in 🤗 Transformers?
|
||||
Answer: over 32 +
|
||||
Question: What does 🤗 Transformers provide?
|
||||
Answer: general - purpose architectures
|
||||
Question: 🤗 Transformers provides interoperability between which frameworks?
|
||||
Answer: tensorflow 2 . 0 and pytorch
|
||||
|
||||
|
||||
|
||||
Language Modeling
|
||||
----------------------------------------------------
|
||||
|
||||
Language modeling is the task of fitting a model to a corpus, which can be domain specific. All popular transformer
|
||||
based models are trained using a variant of language modeling, e.g. BERT with masked language modeling, GPT-2 with
|
||||
causal language modeling.
|
||||
|
||||
Language modeling can be useful outside of pre-training as well, for example to shift the model distribution to be
|
||||
domain-specific: using a language model trained over a very large corpus, and then fine-tuning it to a news dataset
|
||||
or on scientific papers e.g. `LysandreJik/arxiv-nlp <https://huggingface.co/lysandre/arxiv-nlp>`__.
|
||||
|
||||
Masked Language Modeling
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Masked language modeling is the task of masking tokens in a sequence with a masking token, and prompting the model to
|
||||
fill that mask with an appropriate token. This allows the model to attend to both the right context (tokens on the
|
||||
right of the mask) and the left context (tokens on the left of the mask). Such a training creates a strong basis
|
||||
for downstream tasks requiring bi-directional context such as SQuAD (question answering,
|
||||
see `Lewis, Lui, Goyal et al. <https://arxiv.org/abs/1910.13461>`__, part 4.2).
|
||||
|
||||
Here is an example of using pipelines to replace a mask from a sequence:
|
||||
|
||||
.. code-block::
|
||||
|
||||
>>> from transformers import pipeline
|
||||
|
||||
>>> nlp = pipeline("fill-mask")
|
||||
|
||||
This outputs the sequences with the mask filled, the confidence score as well as the token id in the tokenizer
|
||||
vocabulary:
|
||||
|
||||
.. code-block::
|
||||
|
||||
>>> from pprint import pprint
|
||||
>>> pprint(nlp(f"HuggingFace is creating a {nlp.tokenizer.mask_token} that the community uses to solve NLP tasks."))
|
||||
[{'score': 0.1792745739221573,
|
||||
'sequence': '<s>HuggingFace is creating a tool that the community uses to '
|
||||
'solve NLP tasks.</s>',
|
||||
'token': 3944,
|
||||
'token_str': 'Ġtool'},
|
||||
{'score': 0.11349421739578247,
|
||||
'sequence': '<s>HuggingFace is creating a framework that the community uses '
|
||||
'to solve NLP tasks.</s>',
|
||||
'token': 7208,
|
||||
'token_str': 'Ġframework'},
|
||||
{'score': 0.05243554711341858,
|
||||
'sequence': '<s>HuggingFace is creating a library that the community uses to '
|
||||
'solve NLP tasks.</s>',
|
||||
'token': 5560,
|
||||
'token_str': 'Ġlibrary'},
|
||||
{'score': 0.03493533283472061,
|
||||
'sequence': '<s>HuggingFace is creating a database that the community uses '
|
||||
'to solve NLP tasks.</s>',
|
||||
'token': 8503,
|
||||
'token_str': 'Ġdatabase'},
|
||||
{'score': 0.02860250137746334,
|
||||
'sequence': '<s>HuggingFace is creating a prototype that the community uses '
|
||||
'to solve NLP tasks.</s>',
|
||||
'token': 17715,
|
||||
'token_str': 'Ġprototype'}]
|
||||
|
||||
Here is an example doing masked language modeling using a model and a tokenizer. The process is the following:
|
||||
|
||||
- Instantiate a tokenizer and a model from the checkpoint name. The model is identified as a DistilBERT model and
|
||||
loads it with the weights stored in the checkpoint.
|
||||
- Define a sequence with a masked token, placing the :obj:`tokenizer.mask_token` instead of a word.
|
||||
- Encode that sequence into IDs and find the position of the masked token in that list of IDs.
|
||||
- Retrieve the predictions at the index of the mask token: this tensor has the same size as the vocabulary, and the
|
||||
values are the scores attributed to each token. The model gives higher score to tokens he deems probable in that
|
||||
context.
|
||||
- Retrieve the top 5 tokens using the PyTorch :obj:`topk` or TensorFlow :obj:`top_k` methods.
|
||||
- Replace the mask token by the tokens and print the results
|
||||
|
||||
.. code-block::
|
||||
|
||||
>>> ## PYTORCH CODE
|
||||
>>> from transformers import AutoModelWithLMHead, AutoTokenizer
|
||||
>>> import torch
|
||||
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("distilbert-base-cased")
|
||||
>>> model = AutoModelWithLMHead.from_pretrained("distilbert-base-cased")
|
||||
|
||||
>>> sequence = f"Distilled models are smaller than the models they mimic. Using them instead of the large versions would help {tokenizer.mask_token} our carbon footprint."
|
||||
|
||||
>>> input = tokenizer.encode(sequence, return_tensors="pt")
|
||||
>>> mask_token_index = torch.where(input == tokenizer.mask_token_id)[1]
|
||||
|
||||
>>> token_logits = model(input)[0]
|
||||
>>> mask_token_logits = token_logits[0, mask_token_index, :]
|
||||
|
||||
>>> top_5_tokens = torch.topk(mask_token_logits, 5, dim=1).indices[0].tolist()
|
||||
>>> ## TENSORFLOW CODE
|
||||
>>> from transformers import TFAutoModelWithLMHead, AutoTokenizer
|
||||
>>> import tensorflow as tf
|
||||
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("distilbert-base-cased")
|
||||
>>> model = TFAutoModelWithLMHead.from_pretrained("distilbert-base-cased")
|
||||
|
||||
>>> sequence = f"Distilled models are smaller than the models they mimic. Using them instead of the large versions would help {tokenizer.mask_token} our carbon footprint."
|
||||
|
||||
>>> input = tokenizer.encode(sequence, return_tensors="tf")
|
||||
>>> mask_token_index = tf.where(input == tokenizer.mask_token_id)[0, 1]
|
||||
|
||||
>>> token_logits = model(input)[0]
|
||||
>>> mask_token_logits = token_logits[0, mask_token_index, :]
|
||||
|
||||
>>> top_5_tokens = tf.math.top_k(mask_token_logits, 5).indices.numpy()
|
||||
|
||||
|
||||
This prints five sequences, with the top 5 tokens predicted by the model:
|
||||
|
||||
.. code-block::
|
||||
|
||||
>>> for token in top_5_tokens:
|
||||
... print(sequence.replace(tokenizer.mask_token, tokenizer.decode([token])))
|
||||
Distilled models are smaller than the models they mimic. Using them instead of the large versions would help reduce our carbon footprint.
|
||||
Distilled models are smaller than the models they mimic. Using them instead of the large versions would help increase our carbon footprint.
|
||||
Distilled models are smaller than the models they mimic. Using them instead of the large versions would help decrease our carbon footprint.
|
||||
Distilled models are smaller than the models they mimic. Using them instead of the large versions would help offset our carbon footprint.
|
||||
Distilled models are smaller than the models they mimic. Using them instead of the large versions would help improve our carbon footprint.
|
||||
|
||||
|
||||
Causal Language Modeling
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Causal language modeling is the task of predicting the token following a sequence of tokens. In this situation, the
|
||||
model only attends to the left context (tokens on the left of the mask). Such a training is particularly interesting
|
||||
for generation tasks.
|
||||
|
||||
Usually, the next token is predicted by sampling from the logits of the last hidden state the model produces from the input sequence.
|
||||
|
||||
Here is an example using the tokenizer and model and leveraging the :func:`~transformers.PreTrainedModel.top_k_top_p_filtering` method to sample the next token following an input sequence of tokens.
|
||||
|
||||
.. code-block::
|
||||
|
||||
>>> ## PYTORCH CODE
|
||||
>>> from transformers import AutoModelWithLMHead, AutoTokenizer, top_k_top_p_filtering
|
||||
>>> import torch
|
||||
>>> from torch.nn import functional as F
|
||||
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("gpt2")
|
||||
>>> model = AutoModelWithLMHead.from_pretrained("gpt2")
|
||||
|
||||
>>> sequence = f"Hugging Face is based in DUMBO, New York City, and "
|
||||
|
||||
>>> input_ids = tokenizer.encode(sequence, return_tensors="pt")
|
||||
|
||||
>>> # get logits of last hidden state
|
||||
>>> next_token_logits = model(input_ids)[0][:, -1, :]
|
||||
|
||||
>>> # filter
|
||||
>>> filtered_next_token_logits = top_k_top_p_filtering(next_token_logits, top_k=50, top_p=1.0)
|
||||
|
||||
>>> # sample
|
||||
>>> probs = F.softmax(filtered_next_token_logits, dim=-1)
|
||||
>>> next_token = torch.multinomial(probs, num_samples=1)
|
||||
|
||||
>>> generated = torch.cat([input_ids, next_token], dim=-1)
|
||||
|
||||
>>> resulting_string = tokenizer.decode(generated.tolist()[0])
|
||||
>>> ## TENSORFLOW CODE
|
||||
>>> from transformers import TFAutoModelWithLMHead, AutoTokenizer, tf_top_k_top_p_filtering
|
||||
>>> import tensorflow as tf
|
||||
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("gpt2")
|
||||
>>> model = TFAutoModelWithLMHead.from_pretrained("gpt2")
|
||||
|
||||
>>> sequence = f"Hugging Face is based in DUMBO, New York City, and "
|
||||
|
||||
>>> input_ids = tokenizer.encode(sequence, return_tensors="tf")
|
||||
|
||||
>>> # get logits of last hidden state
|
||||
>>> next_token_logits = model(input_ids)[0][:, -1, :]
|
||||
|
||||
>>> # filter
|
||||
>>> filtered_next_token_logits = tf_top_k_top_p_filtering(next_token_logits, top_k=50, top_p=1.0)
|
||||
|
||||
>>> # sample
|
||||
>>> next_token = tf.random.categorical(filtered_next_token_logits, dtype=tf.int32, num_samples=1)
|
||||
|
||||
>>> generated = tf.concat([input_ids, next_token], axis=1)
|
||||
|
||||
>>> resulting_string = tokenizer.decode(generated.numpy().tolist()[0])
|
||||
|
||||
|
||||
This outputs a (hopefully) coherent next token following the original sequence, which is in our case is the word *has*:
|
||||
|
||||
.. code-block::
|
||||
|
||||
print(resulting_string)
|
||||
Hugging Face is based in DUMBO, New York City, and has
|
||||
|
||||
In the next section, we show how this functionality is leveraged in :func:`~transformers.PreTrainedModel.generate` to generate multiple tokens up to a user-defined length.
|
||||
|
||||
Text Generation
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
In text generation (*a.k.a* *open-ended text generation*) the goal is to create a coherent portion of text that is a continuation from the given context. As an example, is it shown how *GPT-2* can be used in pipelines to generate text. As a default all models apply *Top-K* sampling when used in pipelines as configured in their respective configurations (see `gpt-2 config <https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-config.json>`_ for example).
|
||||
|
||||
.. code-block::
|
||||
|
||||
>>> from transformers import pipeline
|
||||
|
||||
>>> text_generator = pipeline("text-generation")
|
||||
>>> print(text_generator("As far as I am concerned, I will", max_length=50, do_sample=False))
|
||||
[{'generated_text': 'As far as I am concerned, I will be the first to admit that I am not a fan of the idea of a "free market." I think that the idea of a free market is a bit of a stretch. I think that the idea'}]
|
||||
|
||||
|
||||
|
||||
Here the model generates a random text with a total maximal length of *50* tokens from context *"As far as I am concerned, I will"*.
|
||||
The default arguments of ``PreTrainedModel.generate()`` can directly be overriden in the pipeline as is shown above for the argument ``max_length``.
|
||||
|
||||
Here is an example for text generation using XLNet and its tokenzier.
|
||||
|
||||
.. code-block::
|
||||
|
||||
>>> ## PYTORCH CODE
|
||||
>>> from transformers import AutoModelWithLMHead, AutoTokenizer
|
||||
|
||||
>>> model = AutoModelWithLMHead.from_pretrained("xlnet-base-cased")
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("xlnet-base-cased")
|
||||
|
||||
>>> # Padding text helps XLNet with short prompts - proposed by Aman Rusia in https://github.com/rusiaaman/XLNet-gen#methodology
|
||||
>>> PADDING_TEXT = """In 1991, the remains of Russian Tsar Nicholas II and his family
|
||||
... (except for Alexei and Maria) are discovered.
|
||||
... The voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the
|
||||
... remainder of the story. 1883 Western Siberia,
|
||||
... a young Grigori Rasputin is asked by his father and a group of men to perform magic.
|
||||
... Rasputin has a vision and denounces one of the men as a horse thief. Although his
|
||||
... father initially slaps him for making such an accusation, Rasputin watches as the
|
||||
... man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
|
||||
... the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous,
|
||||
... with people, even a bishop, begging for his blessing. <eod> </s> <eos>"""
|
||||
|
||||
>>> prompt = "Today the weather is really nice and I am planning on "
|
||||
>>> inputs = tokenizer.encode(PADDING_TEXT + prompt, add_special_tokens=False, return_tensors="pt")
|
||||
|
||||
>>> prompt_length = len(tokenizer.decode(inputs[0], skip_special_tokens=True, clean_up_tokenization_spaces=True))
|
||||
>>> outputs = model.generate(inputs, max_length=250, do_sample=True, top_p=0.95, top_k=60)
|
||||
>>> generated = prompt + tokenizer.decode(outputs[0])[prompt_length:]
|
||||
|
||||
>>> ## TENSORFLOW CODE
|
||||
>>> from transformers import TFAutoModelWithLMHead, AutoTokenizer
|
||||
|
||||
>>> model = TFAutoModelWithLMHead.from_pretrained("xlnet-base-cased")
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("xlnet-base-cased")
|
||||
|
||||
>>> # Padding text helps XLNet with short prompts - proposed by Aman Rusia in https://github.com/rusiaaman/XLNet-gen#methodology
|
||||
>>> PADDING_TEXT = """In 1991, the remains of Russian Tsar Nicholas II and his family
|
||||
... (except for Alexei and Maria) are discovered.
|
||||
... The voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the
|
||||
... remainder of the story. 1883 Western Siberia,
|
||||
... a young Grigori Rasputin is asked by his father and a group of men to perform magic.
|
||||
... Rasputin has a vision and denounces one of the men as a horse thief. Although his
|
||||
... father initially slaps him for making such an accusation, Rasputin watches as the
|
||||
... man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
|
||||
... the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous,
|
||||
... with people, even a bishop, begging for his blessing. <eod> </s> <eos>"""
|
||||
|
||||
>>> prompt = "Today the weather is really nice and I am planning on "
|
||||
>>> inputs = tokenizer.encode(PADDING_TEXT + prompt, add_special_tokens=False, return_tensors="tf")
|
||||
|
||||
>>> prompt_length = len(tokenizer.decode(inputs[0], skip_special_tokens=True, clean_up_tokenization_spaces=True))
|
||||
>>> outputs = model.generate(inputs, max_length=250, do_sample=True, top_p=0.95, top_k=60)
|
||||
>>> generated = prompt + tokenizer.decode(outputs[0])[prompt_length:]
|
||||
|
||||
.. code-block::
|
||||
|
||||
print(generated)
|
||||
|
||||
Text generation is currently possible with *GPT-2*, *OpenAi-GPT*, *CTRL*, *XLNet*, *Transfo-XL* and *Reformer* in PyTorch and for most models in Tensorflow as well. As can be seen in the example above *XLNet* and *Transfo-xl* often need to be padded to work well.
|
||||
GPT-2 is usually a good choice for *open-ended text generation* because it was trained on millions on webpages with a causal language modeling objective.
|
||||
|
||||
For more information on how to apply different decoding strategies for text generation, please also refer to our generation blog post `here <https://huggingface.co/blog/how-to-generate>`_.
|
||||
|
||||
|
||||
Named Entity Recognition
|
||||
----------------------------------------------------
|
||||
|
||||
Named Entity Recognition (NER) is the task of classifying tokens according to a class, for example identifying a
|
||||
token as a person, an organisation or a location.
|
||||
An example of a named entity recognition dataset is the CoNLL-2003 dataset, which is entirely based on that task.
|
||||
If you would like to fine-tune a model on an NER task, you may leverage the `ner/run_ner.py` (PyTorch),
|
||||
`ner/run_pl_ner.py` (leveraging pytorch-lightning) or the `ner/run_tf_ner.py` (TensorFlow) scripts.
|
||||
|
||||
Here is an example using the pipelines do to named entity recognition, trying to identify tokens as belonging to one
|
||||
of 9 classes:
|
||||
|
||||
- O, Outside of a named entity
|
||||
- B-MIS, Beginning of a miscellaneous entity right after another miscellaneous entity
|
||||
- I-MIS, Miscellaneous entity
|
||||
- B-PER, Beginning of a person's name right after another person's name
|
||||
- I-PER, Person's name
|
||||
- B-ORG, Beginning of an organisation right after another organisation
|
||||
- I-ORG, Organisation
|
||||
- B-LOC, Beginning of a location right after another location
|
||||
- I-LOC, Location
|
||||
|
||||
It leverages a fine-tuned model on CoNLL-2003, fine-tuned by `@stefan-it <https://github.com/stefan-it>`__ from
|
||||
`dbmdz <https://github.com/dbmdz>`__.
|
||||
|
||||
.. code-block::
|
||||
|
||||
>>> from transformers import pipeline
|
||||
|
||||
>>> nlp = pipeline("ner")
|
||||
|
||||
>>> sequence = "Hugging Face Inc. is a company based in New York City. Its headquarters are in DUMBO, therefore very"
|
||||
... "close to the Manhattan Bridge which is visible from the window."
|
||||
|
||||
|
||||
This outputs a list of all words that have been identified as an entity from the 9 classes defined above. Here is the
|
||||
expected results:
|
||||
|
||||
.. code-block::
|
||||
|
||||
print(nlp(sequence))
|
||||
|
||||
[
|
||||
{'word': 'Hu', 'score': 0.9995632767677307, 'entity': 'I-ORG'},
|
||||
{'word': '##gging', 'score': 0.9915938973426819, 'entity': 'I-ORG'},
|
||||
{'word': 'Face', 'score': 0.9982671737670898, 'entity': 'I-ORG'},
|
||||
{'word': 'Inc', 'score': 0.9994403719902039, 'entity': 'I-ORG'},
|
||||
{'word': 'New', 'score': 0.9994346499443054, 'entity': 'I-LOC'},
|
||||
{'word': 'York', 'score': 0.9993270635604858, 'entity': 'I-LOC'},
|
||||
{'word': 'City', 'score': 0.9993864893913269, 'entity': 'I-LOC'},
|
||||
{'word': 'D', 'score': 0.9825621843338013, 'entity': 'I-LOC'},
|
||||
{'word': '##UM', 'score': 0.936983048915863, 'entity': 'I-LOC'},
|
||||
{'word': '##BO', 'score': 0.8987102508544922, 'entity': 'I-LOC'},
|
||||
{'word': 'Manhattan', 'score': 0.9758241176605225, 'entity': 'I-LOC'},
|
||||
{'word': 'Bridge', 'score': 0.990249514579773, 'entity': 'I-LOC'}
|
||||
]
|
||||
|
||||
Note how the words "Hugging Face" have been identified as an organisation, and "New York City", "DUMBO" and
|
||||
"Manhattan Bridge" have been identified as locations.
|
||||
|
||||
Here is an example doing named entity recognition using a model and a tokenizer. The process is the following:
|
||||
|
||||
- Instantiate a tokenizer and a model from the checkpoint name. The model is identified as a BERT model and
|
||||
loads it with the weights stored in the checkpoint.
|
||||
- Define the label list with which the model was trained on.
|
||||
- Define a sequence with known entities, such as "Hugging Face" as an organisation and "New York City" as a location.
|
||||
- Split words into tokens so that they can be mapped to the predictions. We use a small hack by firstly completely
|
||||
encoding and decoding the sequence, so that we're left with a string that contains the special tokens.
|
||||
- Encode that sequence into IDs (special tokens are added automatically).
|
||||
- Retrieve the predictions by passing the input to the model and getting the first output. This results in a
|
||||
distribution over the 9 possible classes for each token. We take the argmax to retrieve the most likely class
|
||||
for each token.
|
||||
- Zip together each token with its prediction and print it.
|
||||
|
||||
.. code-block::
|
||||
|
||||
>>> ## PYTORCH CODE
|
||||
>>> from transformers import AutoModelForTokenClassification, AutoTokenizer
|
||||
>>> import torch
|
||||
|
||||
>>> model = AutoModelForTokenClassification.from_pretrained("dbmdz/bert-large-cased-finetuned-conll03-english")
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
|
||||
|
||||
>>> label_list = [
|
||||
... "O", # Outside of a named entity
|
||||
... "B-MISC", # Beginning of a miscellaneous entity right after another miscellaneous entity
|
||||
... "I-MISC", # Miscellaneous entity
|
||||
... "B-PER", # Beginning of a person's name right after another person's name
|
||||
... "I-PER", # Person's name
|
||||
... "B-ORG", # Beginning of an organisation right after another organisation
|
||||
... "I-ORG", # Organisation
|
||||
... "B-LOC", # Beginning of a location right after another location
|
||||
... "I-LOC" # Location
|
||||
... ]
|
||||
|
||||
>>> sequence = "Hugging Face Inc. is a company based in New York City. Its headquarters are in DUMBO, therefore very" \
|
||||
... "close to the Manhattan Bridge."
|
||||
|
||||
>>> # Bit of a hack to get the tokens with the special tokens
|
||||
>>> tokens = tokenizer.tokenize(tokenizer.decode(tokenizer.encode(sequence)))
|
||||
>>> inputs = tokenizer.encode(sequence, return_tensors="pt")
|
||||
|
||||
>>> outputs = model(inputs)[0]
|
||||
>>> predictions = torch.argmax(outputs, dim=2)
|
||||
>>> ## TENSORFLOW CODE
|
||||
>>> from transformers import TFAutoModelForTokenClassification, AutoTokenizer
|
||||
>>> import tensorflow as tf
|
||||
|
||||
>>> model = TFAutoModelForTokenClassification.from_pretrained("dbmdz/bert-large-cased-finetuned-conll03-english")
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
|
||||
|
||||
>>> label_list = [
|
||||
... "O", # Outside of a named entity
|
||||
... "B-MISC", # Beginning of a miscellaneous entity right after another miscellaneous entity
|
||||
... "I-MISC", # Miscellaneous entity
|
||||
... "B-PER", # Beginning of a person's name right after another person's name
|
||||
... "I-PER", # Person's name
|
||||
... "B-ORG", # Beginning of an organisation right after another organisation
|
||||
... "I-ORG", # Organisation
|
||||
... "B-LOC", # Beginning of a location right after another location
|
||||
... "I-LOC" # Location
|
||||
... ]
|
||||
|
||||
>>> sequence = "Hugging Face Inc. is a company based in New York City. Its headquarters are in DUMBO, therefore very" \
|
||||
... "close to the Manhattan Bridge."
|
||||
|
||||
>>> # Bit of a hack to get the tokens with the special tokens
|
||||
>>> tokens = tokenizer.tokenize(tokenizer.decode(tokenizer.encode(sequence)))
|
||||
>>> inputs = tokenizer.encode(sequence, return_tensors="tf")
|
||||
|
||||
>>> outputs = model(inputs)[0]
|
||||
>>> predictions = tf.argmax(outputs, axis=2)
|
||||
|
||||
|
||||
This outputs a list of each token mapped to their prediction. Differently from the pipeline, here every token has
|
||||
a prediction as we didn't remove the "0" class which means that no particular entity was found on that token. The
|
||||
following array should be the output:
|
||||
|
||||
.. code-block::
|
||||
|
||||
>>> print([(token, label_list[prediction]) for token, prediction in zip(tokens, predictions[0].numpy())])
|
||||
[('[CLS]', 'O'), ('Hu', 'I-ORG'), ('##gging', 'I-ORG'), ('Face', 'I-ORG'), ('Inc', 'I-ORG'), ('.', 'O'), ('is', 'O'), ('a', 'O'), ('company', 'O'), ('based', 'O'), ('in', 'O'), ('New', 'I-LOC'), ('York', 'I-LOC'), ('City', 'I-LOC'), ('.', 'O'), ('Its', 'O'), ('headquarters', 'O'), ('are', 'O'), ('in', 'O'), ('D', 'I-LOC'), ('##UM', 'I-LOC'), ('##BO', 'I-LOC'), (',', 'O'), ('therefore', 'O'), ('very', 'O'), ('##c', 'O'), ('##lose', 'O'), ('to', 'O'), ('the', 'O'), ('Manhattan', 'I-LOC'), ('Bridge', 'I-LOC'), ('.', 'O'), ('[SEP]', 'O')]
|
||||
|
||||
Summarization
|
||||
----------------------------------------------------
|
||||
|
||||
Summarization is the task of summarizing a text / an article into a shorter text.
|
||||
|
||||
An example of a summarization dataset is the CNN / Daily Mail dataset, which consists of long news articles and was created for the task of summarization.
|
||||
If you would like to fine-tune a model on a summarization task, you may leverage the ``examples/summarization/bart/run_train.sh`` (leveraging pytorch-lightning) script.
|
||||
|
||||
Here is an example using the pipelines do to summarization.
|
||||
It leverages a Bart model that was fine-tuned on the CNN / Daily Mail data set.
|
||||
|
||||
.. code-block::
|
||||
|
||||
>>> from transformers import pipeline
|
||||
|
||||
>>> summarizer = pipeline("summarization")
|
||||
|
||||
>>> ARTICLE = """ New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County, New York.
|
||||
... A year later, she got married again in Westchester County, but to a different man and without divorcing her first husband.
|
||||
... Only 18 days after that marriage, she got hitched yet again. Then, Barrientos declared "I do" five more times, sometimes only within two weeks of each other.
|
||||
... In 2010, she married once more, this time in the Bronx. In an application for a marriage license, she stated it was her "first and only" marriage.
|
||||
... Barrientos, now 39, is facing two criminal counts of "offering a false instrument for filing in the first degree," referring to her false statements on the
|
||||
... 2010 marriage license application, according to court documents.
|
||||
... Prosecutors said the marriages were part of an immigration scam.
|
||||
... On Friday, she pleaded not guilty at State Supreme Court in the Bronx, according to her attorney, Christopher Wright, who declined to comment further.
|
||||
... After leaving court, Barrientos was arrested and charged with theft of service and criminal trespass for allegedly sneaking into the New York subway through an emergency exit, said Detective
|
||||
... Annette Markowski, a police spokeswoman. In total, Barrientos has been married 10 times, with nine of her marriages occurring between 1999 and 2002.
|
||||
... All occurred either in Westchester County, Long Island, New Jersey or the Bronx. She is believed to still be married to four men, and at one time, she was married to eight men at once, prosecutors say.
|
||||
... Prosecutors said the immigration scam involved some of her husbands, who filed for permanent residence status shortly after the marriages.
|
||||
... Any divorces happened only after such filings were approved. It was unclear whether any of the men will be prosecuted.
|
||||
... The case was referred to the Bronx District Attorney\'s Office by Immigration and Customs Enforcement and the Department of Homeland Security\'s
|
||||
... Investigation Division. Seven of the men are from so-called "red-flagged" countries, including Egypt, Turkey, Georgia, Pakistan and Mali.
|
||||
... Her eighth husband, Rashid Rajput, was deported in 2006 to his native Pakistan after an investigation by the Joint Terrorism Task Force.
|
||||
... If convicted, Barrientos faces up to four years in prison. Her next court appearance is scheduled for May 18.
|
||||
... """
|
||||
|
||||
Because the summarization pipeline depends on the ``PretrainedModel.generate()`` method, we can override the default arguments
|
||||
of ``PretrainedModel.generate()`` directly in the pipeline as is shown for ``max_length`` and ``min_length`` above.
|
||||
This outputs the following summary:
|
||||
|
||||
.. code-block::
|
||||
|
||||
>>> print(summarizer(ARTICLE, max_length=130, min_length=30, do_sample=False))
|
||||
[{'summary_text': 'Liana Barrientos, 39, is charged with two counts of "offering a false instrument for filing in the first degree" In total, she has been married 10 times, with nine of her marriages occurring between 1999 and 2002. She is believed to still be married to four men.'}]
|
||||
|
||||
Here is an example doing summarization using a model and a tokenizer. The process is the following:
|
||||
|
||||
- Instantiate a tokenizer and a model from the checkpoint name. Summarization is usually done using an encoder-decoder model, such as ``Bart`` or ``T5``.
|
||||
- Define the article that should be summarizaed.
|
||||
- Leverage the ``PretrainedModel.generate()`` method.
|
||||
- Add the T5 specific prefix "summarize: ".
|
||||
|
||||
Here Google`s T5 model is used that was only pre-trained on a multi-task mixed data set (including CNN / Daily Mail), but nevertheless yields very good results.
|
||||
.. code-block::
|
||||
|
||||
>>> ## PYTORCH CODE
|
||||
>>> from transformers import AutoModelWithLMHead, AutoTokenizer
|
||||
|
||||
>>> model = AutoModelWithLMHead.from_pretrained("t5-base")
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("t5-base")
|
||||
|
||||
>>> # T5 uses a max_length of 512 so we cut the article to 512 tokens.
|
||||
>>> inputs = tokenizer.encode("summarize: " + ARTICLE, return_tensors="pt", max_length=512)
|
||||
>>> outputs = model.generate(inputs, max_length=150, min_length=40, length_penalty=2.0, num_beams=4, early_stopping=True)
|
||||
>>> ## TENSORFLOW CODE
|
||||
>>> from transformers import TFAutoModelWithLMHead, AutoTokenizer
|
||||
|
||||
>>> model = TFAutoModelWithLMHead.from_pretrained("t5-base")
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("t5-base")
|
||||
|
||||
>>> # T5 uses a max_length of 512 so we cut the article to 512 tokens.
|
||||
>>> inputs = tokenizer.encode("summarize: " + ARTICLE, return_tensors="tf", max_length=512)
|
||||
>>> outputs = model.generate(inputs, max_length=150, min_length=40, length_penalty=2.0, num_beams=4, early_stopping=True)
|
||||
|
||||
Translation
|
||||
----------------------------------------------------
|
||||
|
||||
Translation is the task of translating a text from one language to another.
|
||||
|
||||
An example of a translation dataset is the WMT English to German dataset, which has English sentences as the input data
|
||||
and German sentences as the target data.
|
||||
|
||||
Here is an example using the pipelines do to translation.
|
||||
It leverages a T5 model that was only pre-trained on a multi-task mixture dataset (including WMT), but yields impressive
|
||||
translation results nevertheless.
|
||||
|
||||
.. code-block::
|
||||
|
||||
>>> from transformers import pipeline
|
||||
|
||||
>>> translator = pipeline("translation_en_to_de")
|
||||
>>> print(translator("Hugging Face is a technology company based in New York and Paris", max_length=40))
|
||||
[{'translation_text': 'Hugging Face ist ein Technologieunternehmen mit Sitz in New York und Paris.'}]
|
||||
|
||||
Because the translation pipeline depends on the ``PretrainedModel.generate()`` method, we can override the default arguments
|
||||
of ``PretrainedModel.generate()`` directly in the pipeline as is shown for ``max_length`` above.
|
||||
This outputs the following translation into German:
|
||||
|
||||
::
|
||||
|
||||
Hugging Face ist ein Technologieunternehmen mit Sitz in New York und Paris.
|
||||
|
||||
Here is an example doing translation using a model and a tokenizer. The process is the following:
|
||||
|
||||
- Instantiate a tokenizer and a model from the checkpoint name. Summarization is usually done using an encoder-decoder model, such as ``Bart`` or ``T5``.
|
||||
- Define the article that should be summarizaed.
|
||||
- Leverage the ``PretrainedModel.generate()`` method.
|
||||
- Add the T5 specific prefix "translate English to German: "
|
||||
|
||||
.. code-block::
|
||||
|
||||
>>> ## PYTORCH CODE
|
||||
>>> from transformers import AutoModelWithLMHead, AutoTokenizer
|
||||
|
||||
>>> model = AutoModelWithLMHead.from_pretrained("t5-base")
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("t5-base")
|
||||
|
||||
>>> inputs = tokenizer.encode("translate English to German: Hugging Face is a technology company based in New York and Paris", return_tensors="pt")
|
||||
>>> outputs = model.generate(inputs, max_length=40, num_beams=4, early_stopping=True)
|
||||
|
||||
>>> print(outputs)
|
||||
tensor([[ 0, 11560, 3896, 8881, 229, 236, 3, 14366, 15377, 181,
|
||||
11216, 16, 368, 1060, 64, 1919, 5]])
|
||||
>>> ## TENSORFLOW CODE
|
||||
>>> from transformers import TFAutoModelWithLMHead, AutoTokenizer
|
||||
|
||||
>>> model = TFAutoModelWithLMHead.from_pretrained("t5-base")
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("t5-base")
|
||||
|
||||
>>> inputs = tokenizer.encode("translate English to German: Hugging Face is a technology company based in New York and Paris", return_tensors="tf")
|
||||
>>> outputs = model.generate(inputs, max_length=40, num_beams=4, early_stopping=True)
|
||||
|
||||
>>> print(outputs)
|
||||
tf.Tensor(
|
||||
[[ 0 11560 3896 8881 229 236 3 14366 15377 181 11216 16
|
||||
368 1060 64 1919 5]], shape=(1, 17), dtype=int32)
|
||||
@ -12,7 +12,7 @@ According to Pytorch's documentation: "TorchScript is a way to create serializab
|
||||
Pytorch's two modules `JIT and TRACE <https://pytorch.org/docs/stable/jit.html>`_ allow the developer to export
|
||||
their model to be re-used in other programs, such as efficiency-oriented C++ programs.
|
||||
|
||||
We have provided an interface that allows the export of `transformers` models to TorchScript so that they can
|
||||
We have provided an interface that allows the export of 🤗 Transformers models to TorchScript so that they can
|
||||
be reused in a different environment than a Pytorch-based python program. Here we explain how to use our models so that
|
||||
they can be exported, and what to be mindful of when using these models with TorchScript.
|
||||
|
||||
|
||||
323
docs/source/training.rst
Normal file
323
docs/source/training.rst
Normal file
@ -0,0 +1,323 @@
|
||||
Training and fine-tuning
|
||||
========================
|
||||
|
||||
Model classes in 🤗 Transformers are designed to be compatible with native
|
||||
PyTorch and TensorFlow 2 and can be used seemlessly with either. In this
|
||||
quickstart, we will show how to fine-tune (or train from scratch) a model
|
||||
using the standard training tools available in either framework. We will also
|
||||
show how to use our included :func:`~transformers.Trainer` class which
|
||||
handles much of the complexity of training for you.
|
||||
|
||||
This guide assume that you are already familiar with loading and use our
|
||||
models for inference; otherwise, see the :doc:`task summary <task_summary>`. We also assume
|
||||
that you are familiar with training deep neural networks in either PyTorch or
|
||||
TF2, and focus specifically on the nuances and tools for training models in
|
||||
🤗 Transformers.
|
||||
|
||||
Sections:
|
||||
|
||||
* :ref:`pytorch`
|
||||
* :ref:`tensorflow`
|
||||
* :ref:`trainer`
|
||||
* :ref:`additional-resources`
|
||||
|
||||
.. _pytorch:
|
||||
|
||||
Fine-tuning in native PyTorch
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Model classes in 🤗 Transformers that don't begin with ``TF`` are
|
||||
`PyTorch Modules <https://pytorch.org/docs/master/generated/torch.nn.Module.html>`_,
|
||||
meaning that you can use them just as you would any model in PyTorch for
|
||||
both inference and optimization.
|
||||
|
||||
Let's consider the common task of fine-tuning a masked language model like
|
||||
BERT on a sequence classification dataset. When we instantiate a model with
|
||||
:func:`~transformers.PreTrainedModel.from_pretrained`, the model
|
||||
configuration and pre-trained weights
|
||||
of the specified model are used to initialize the model. The
|
||||
library also includes a number of task-specific final layers or 'heads' whose
|
||||
weights are instantiated randomly when not present in the specified
|
||||
pre-trained model. For example, instantiating a model with
|
||||
``BertForSequenceClassification.from_pretrained('bert-base-uncased', num_classes=2)``
|
||||
will create a BERT model instance with encoder weights copied from the
|
||||
``bert-base-uncased`` model and a randomly initialized sequence
|
||||
classification head on top of the encoder with an output size of 2. Models
|
||||
are initialized in ``eval`` mode by default. We can call ``model.train()`` to
|
||||
put it in train mode.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from transformers import BertForSequenceClassification
|
||||
model = BertForSequenceClassification.from_pretrained('bert-base-uncased')
|
||||
model.train()
|
||||
|
||||
This is useful because it allows us to make use of the pre-trained BERT
|
||||
encoder and easily train it on whatever sequence classification dataset we
|
||||
choose. We can use any PyTorch optimizer, but our library also provides the
|
||||
:func:`~transformers.AdamW` optimizer which implements gradient bias
|
||||
correction as well as weight decay.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from transformers import AdamW
|
||||
optimizer = AdamW(model.parameters(), lr=1e-5)
|
||||
|
||||
The optimizer allows us to apply different hyperpameters for specific
|
||||
parameter groups. For example, we can apply weight decay to all parameters
|
||||
other than bias and layer normalization terms:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
no_decay = ['bias', 'LayerNorm.weight']
|
||||
optimizer_grouped_parameters = [
|
||||
{'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
|
||||
{'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
|
||||
]
|
||||
optimizer = AdamW(optimizer_grouped_parameters, lr=1e-5)
|
||||
|
||||
Now we can set up a simple dummy training batch using
|
||||
:func:`~transformers.PreTrainedTokenizer.__call__`. This returns a
|
||||
:func:`~transformers.BatchEncoding` instance which
|
||||
prepares everything we might need to pass to the model.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from transformers import BertTokenizer
|
||||
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
|
||||
text_batch = ["I love Pixar.", "I don't care for Pixar."]
|
||||
encoding = tokenizer(text_batch, return_tensors='pt', padding=True, truncation=True)
|
||||
input_ids = encoding['input_ids']
|
||||
attention_mask = encoding['attention_mask']
|
||||
|
||||
When we call a classification model with the ``labels`` argument, the first
|
||||
returned element is the Cross Entropy loss between the predictions and the
|
||||
passed labels. Having already set up our optimizer, we can then do a
|
||||
backwards pass and update the weights:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
labels = torch.tensor([1,0]).unsqueeze(0)
|
||||
outputs = model(input_ids, attention_mask=attention_mask, labels=labels)
|
||||
loss = outputs[0]
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
|
||||
Alternatively, you can just get the logits and calculate the loss yourself.
|
||||
The following is equivalent to the previous example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from torch.nn import functional as F
|
||||
labels = torch.tensor([1,0]).unsqueeze(0)
|
||||
outputs = model(input_ids, attention_mask=attention_mask)
|
||||
loss = F.cross_entropy(labels, outputs[0])
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
|
||||
Of course, you can train on GPU by calling ``to('cuda')`` on the model and
|
||||
inputs as usual.
|
||||
|
||||
We also provide a few learning rate scheduling tools. With the following, we
|
||||
can set up a scheduler which warms up for ``num_warmup_steps`` and then
|
||||
linearly decays to 0 by the end of training.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from transformers import get_linear_schedule_with_warmup
|
||||
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps, num_train_steps)
|
||||
|
||||
Then all we have to do is call ``scheduler.step()`` after ``optimizer.step()``.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
...
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
scheduler.step()
|
||||
|
||||
We highly recommend using :func:`~transformers.Trainer`, discussed below,
|
||||
which conveniently handles the moving parts of training 🤗 Transformers models
|
||||
with features like mixed precision and easy tensorboard logging.
|
||||
|
||||
|
||||
Freezing the encoder
|
||||
--------------------
|
||||
|
||||
In some cases, you might be interested in keeping the weights of the
|
||||
pre-trained encoder frozen and optimizing only the weights of the head
|
||||
layers. To do so, simply set the ``requires_grad`` attribute to ``False`` on
|
||||
the encoder parameters, which can be accessed with the ``base_model``
|
||||
submodule on any task-specific model in the library:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
for param in model.base_model.parameters():
|
||||
param.requires_grad = False
|
||||
|
||||
|
||||
.. _tensorflow:
|
||||
|
||||
Fine-tuning in native TensorFlow 2
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Models can also be trained natively in TensorFlow 2. Just as with PyTorch,
|
||||
TensorFlow models can be instantiated with
|
||||
:func:`~transformers.PreTrainedModel.from_pretrained` to load the weights of
|
||||
the encoder from a pretrained model.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from transformers import TFBertForSequenceClassification
|
||||
model = TFBertForSequenceClassification.from_pretrained('bert-base-uncased')
|
||||
|
||||
Let's use ``tensorflow_datasets`` to load in the `MRPC dataset
|
||||
<https://www.tensorflow.org/datasets/catalog/glue#gluemrpc>`_ from GLUE. We
|
||||
can then use our built-in
|
||||
:func:`~transformers.data.processors.glue.glue_convert_examples_to_features`
|
||||
to tokenize MRPC and convert it to a TensorFlow ``Dataset`` object. Note that
|
||||
tokenizers are framework-agnostic, so there is no need to prepend ``TF`` to
|
||||
the pretrained tokenizer name.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from transformers import BertTokenizer, glue_convert_examples_to_features
|
||||
import tensorflow_datasets as tfds
|
||||
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
|
||||
data = tfds.load('glue/mrpc')
|
||||
train_dataset = glue_convert_examples_to_features(data['train'], tokenizer, max_length=128, task='mrpc')
|
||||
train_dataset = train_dataset.shuffle(100).batch(32).repeat(2)
|
||||
|
||||
The model can then be compiled and trained as any Keras model:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
optimizer = tf.keras.optimizers.Adam(learning_rate=3e-5)
|
||||
loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
|
||||
model.compile(optimizer=optimizer, loss=loss)
|
||||
model.fit(train_dataset, epochs=2, steps_per_epoch=115)
|
||||
|
||||
With the tight interoperability between TensorFlow and PyTorch models, you
|
||||
can even save the model and then reload it as a PyTorch model (or vice-versa):
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from transformers import BertForSequenceClassification
|
||||
model.save_pretrained('./my_mrpc_model/')
|
||||
pytorch_model = BertForSequenceClassification.from_pretrained('./my_mrpc_model/', from_tf=True)
|
||||
|
||||
|
||||
.. _trainer:
|
||||
|
||||
Trainer
|
||||
^^^^^^^
|
||||
|
||||
We also provide a simple but feature-complete training and evaluation
|
||||
interface through :func:`~transformers.Trainer` and
|
||||
:func:`~transformers.TFTrainer`. You can train, fine-tune,
|
||||
and evaluate any 🤗 Transformers model with a wide range of training options and
|
||||
with built-in features like logging, gradient accumulation, and mixed
|
||||
precision.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
## PYTORCH CODE
|
||||
from transformers import BertForSequenceClassification, Trainer, TrainingArguments
|
||||
|
||||
model = BertForSequenceClassification.from_pretrained("bert-large-uncased")
|
||||
|
||||
training_args = TrainingArguments(
|
||||
output_dir='./results', # output directory
|
||||
num_train_epochs=3, # total # of training epochs
|
||||
per_device_train_batch_size=16, # batch size per device during training
|
||||
per_device_eval_batch_size=64, # batch size for evaluation
|
||||
warmup_steps=500, # number of warmup steps for learning rate scheduler
|
||||
weight_decay=0.01, # strength of weight decay
|
||||
logging_dir='./logs', # directory for storing logs
|
||||
)
|
||||
|
||||
trainer = Trainer(
|
||||
model=model, # the instantiated 🤗 Transformers model to be trained
|
||||
args=training_args, # training arguments, defined above
|
||||
train_dataset=train_dataset, # training dataset
|
||||
eval_dataset=test_dataset # evaluation dataset
|
||||
)
|
||||
## TENSORFLOW CODE
|
||||
from transformers import TFBertForSequenceClassification, TFTrainer, TFTrainingArguments
|
||||
|
||||
model = TFBertForSequenceClassification.from_pretrained("bert-large-uncased")
|
||||
|
||||
training_args = TFTrainingArguments(
|
||||
output_dir='./results', # output directory
|
||||
num_train_epochs=3, # total # of training epochs
|
||||
per_device_train_batch_size=16, # batch size per device during training
|
||||
per_device_eval_batch_size=64, # batch size for evaluation
|
||||
warmup_steps=500, # number of warmup steps for learning rate scheduler
|
||||
weight_decay=0.01, # strength of weight decay
|
||||
logging_dir='./logs', # directory for storing logs
|
||||
)
|
||||
|
||||
trainer = TFTrainer(
|
||||
model=model, # the instantiated 🤗 Transformers model to be trained
|
||||
args=training_args, # training arguments, defined above
|
||||
train_dataset=tfds_train_dataset, # tensorflow_datasets training dataset
|
||||
eval_dataset=tfds_test_dataset # tensorflow_datasets evaluation dataset
|
||||
)
|
||||
|
||||
Now simply call ``trainer.train()`` to train and ``trainer.evaluate()`` to
|
||||
evaluate. You can use your own module as well, but the first
|
||||
argument returned from ``forward`` must be the loss which you wish to
|
||||
optimize.
|
||||
|
||||
:func:`~transformers.Trainer` uses a built-in default function to collate
|
||||
batches and prepare them to be fed into the model. If needed, you can also
|
||||
use the ``data_collator`` argument to pass your own collator function which
|
||||
takes in the data in the format provides by your dataset and returns a
|
||||
batch ready to be fed into the model. Note that
|
||||
:func:`~transformers.TFTrainer` expects the passed datasets to be dataset
|
||||
objects from ``tensorflow_datasets``.
|
||||
|
||||
To calculate additional metrics in addition to the loss, you can also define
|
||||
your own ``compute_metrics`` function and pass it to the trainer.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from sklearn.metrics import precision_recall_fscore_support
|
||||
|
||||
def compute_metrics(pred):
|
||||
labels = pred.label_ids
|
||||
preds = pred.predictions.argmax(-1)
|
||||
precision, recall, f1, _ = precision_recall_fscore_support(labels, preds, average='binary')
|
||||
acc = accuracy_score(labels, preds)
|
||||
return {
|
||||
'accuracy': acc,
|
||||
'f1': f1,
|
||||
'precision': precision,
|
||||
'recall': recall
|
||||
}
|
||||
|
||||
Finally, you can view the results, including any calculated metrics, by
|
||||
launching tensorboard in your specified ``logging_dir`` directory.
|
||||
|
||||
|
||||
.. _additional-resources:
|
||||
|
||||
Additional resources
|
||||
^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
* `A lightweight colab demo
|
||||
<https://colab.research.google.com/drive/1-JIJlao4dI-Ilww_NnTc0rxtp-ymgDgM?usp=sharing>`_
|
||||
which uses ``Trainer`` for IMDb sentiment classification.
|
||||
|
||||
* `🤗 Transformers Examples <https://github.com/huggingface/transformers/tree/master/examples>`_
|
||||
including scripts for training and fine-tuning on GLUE, SQuAD, and
|
||||
several other tasks.
|
||||
|
||||
* `How to train a language model
|
||||
<https://colab.research.google.com/github/huggingface/blog/blob/master/notebooks/01_how_to_train.ipynb>`_,
|
||||
a detailed colab notebook which uses ``Trainer`` to train a masked
|
||||
language model from scratch on Esperanto.
|
||||
|
||||
* `🤗 Transformers Notebooks <./notebooks.html>`_ which contain dozens
|
||||
of example notebooks from the community for training and using
|
||||
🤗 Transformers on a variety of tasks.
|
||||
@ -1,392 +1,80 @@
|
||||
# Examples
|
||||
## Examples
|
||||
|
||||
In this section a few examples are put together. All of these examples work for several models, making use of the very
|
||||
similar API between the different models.
|
||||
Version 2.9 of 🤗 Transformers introduces a new [`Trainer`](https://github.com/huggingface/transformers/blob/master/src/transformers/trainer.py) class for PyTorch, and its equivalent [`TFTrainer`](https://github.com/huggingface/transformers/blob/master/src/transformers/trainer_tf.py) for TF 2.
|
||||
Running the examples requires PyTorch 1.3.1+ or TensorFlow 2.1+.
|
||||
|
||||
| Section | Description |
|
||||
|----------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| [Language Model fine-tuning](#language-model-fine-tuning) | Fine-tuning the library models for language modeling on a text dataset. Causal language modeling for GPT/GPT-2, masked language modeling for BERT/RoBERTa. |
|
||||
| [Language Generation](#language-generation) | Conditional text generation using the auto-regressive models of the library: GPT, GPT-2, Transformer-XL and XLNet. |
|
||||
| [GLUE](#glue) | Examples running BERT/XLM/XLNet/RoBERTa on the 9 GLUE tasks. Examples feature distributed training as well as half-precision. |
|
||||
| [SQuAD](#squad) | Using BERT for question answering, examples with distributed training. |
|
||||
| [Multiple Choice](#multiple choice) | Examples running BERT/XLNet/RoBERTa on the SWAG/RACE/ARC tasks.
|
||||
Here is the list of all our examples:
|
||||
- **grouped by task** (all official examples work for multiple models)
|
||||
- with information on whether they are **built on top of `Trainer`/`TFTrainer`** (if not, they still work, they might just lack some features),
|
||||
- whether they also include examples for **`pytorch-lightning`**, which is a great fully-featured, general-purpose training library for PyTorch,
|
||||
- links to **Colab notebooks** to walk through the scripts and run them easily,
|
||||
- links to **Cloud deployments** to be able to deploy large-scale trainings in the Cloud with little to no setup.
|
||||
|
||||
## Language model fine-tuning
|
||||
This is still a work-in-progress – in particular documentation is still sparse – so please **contribute improvements/pull requests.**
|
||||
|
||||
Based on the script [`run_lm_finetuning.py`](https://github.com/huggingface/transformers/blob/master/examples/run_lm_finetuning.py).
|
||||
|
||||
Fine-tuning the library models for language modeling on a text dataset for GPT, GPT-2, BERT and RoBERTa (DistilBERT
|
||||
to be added soon). GPT and GPT-2 are fine-tuned using a causal language modeling (CLM) loss while BERT and RoBERTa
|
||||
are fine-tuned using a masked language modeling (MLM) loss.
|
||||
# The Big Table of Tasks
|
||||
|
||||
Before running the following example, you should get a file that contains text on which the language model will be
|
||||
fine-tuned. A good example of such text is the [WikiText-2 dataset](https://blog.einstein.ai/the-wikitext-long-term-dependency-language-modeling-dataset/).
|
||||
| Task | Example datasets | Trainer support | TFTrainer support | pytorch-lightning | Colab
|
||||
|---|---|:---:|:---:|:---:|:---:|
|
||||
| [**`language-modeling`**](https://github.com/huggingface/transformers/tree/master/examples/language-modeling) | Raw text | ✅ | - | - | [](https://colab.research.google.com/github/huggingface/blog/blob/master/notebooks/01_how_to_train.ipynb)
|
||||
| [**`text-classification`**](https://github.com/huggingface/transformers/tree/master/examples/text-classification) | GLUE, XNLI | ✅ | ✅ | ✅ | [](https://colab.research.google.com/github/huggingface/blog/blob/master/notebooks/trainer/01_text_classification.ipynb)
|
||||
| [**`token-classification`**](https://github.com/huggingface/transformers/tree/master/examples/token-classification) | CoNLL NER | ✅ | ✅ | ✅ | -
|
||||
| [**`multiple-choice`**](https://github.com/huggingface/transformers/tree/master/examples/multiple-choice) | SWAG, RACE, ARC | ✅ | ✅ | - | [](https://colab.research.google.com/github/ViktorAlm/notebooks/blob/master/MPC_GPU_Demo_for_TF_and_PT.ipynb)
|
||||
| [**`question-answering`**](https://github.com/huggingface/transformers/tree/master/examples/question-answering) | SQuAD | - | ✅ | - | -
|
||||
| [**`text-generation`**](https://github.com/huggingface/transformers/tree/master/examples/text-generation) | - | n/a | n/a | n/a | [](https://colab.research.google.com/github/huggingface/blog/blob/master/notebooks/02_how_to_generate.ipynb)
|
||||
| [**`distillation`**](https://github.com/huggingface/transformers/tree/master/examples/distillation) | All | - | - | - | -
|
||||
| [**`summarization`**](https://github.com/huggingface/transformers/tree/master/examples/seq2seq) | CNN/Daily Mail | - | - | ✅ | -
|
||||
| [**`translation`**](https://github.com/huggingface/transformers/tree/master/examples/seq2seq) | WMT | - | - | ✅ | -
|
||||
| [**`bertology`**](https://github.com/huggingface/transformers/tree/master/examples/bertology) | - | - | - | - | -
|
||||
| [**`adversarial`**](https://github.com/huggingface/transformers/tree/master/examples/adversarial) | HANS | ✅ | - | - | -
|
||||
|
||||
We will refer to two different files: `$TRAIN_FILE`, which contains text for training, and `$TEST_FILE`, which contains
|
||||
text that will be used for evaluation.
|
||||
|
||||
### GPT-2/GPT and causal language modeling
|
||||
<br>
|
||||
|
||||
The following example fine-tunes GPT-2 on WikiText-2. We're using the raw WikiText-2 (no tokens were replaced before
|
||||
the tokenization). The loss here is that of causal language modeling.
|
||||
## Important note
|
||||
|
||||
**Important**
|
||||
To make sure you can successfully run the latest versions of the example scripts, you have to install the library from source and install some example-specific requirements.
|
||||
Execute the following steps in a new virtual environment:
|
||||
|
||||
```bash
|
||||
export TRAIN_FILE=/path/to/dataset/wiki.train.raw
|
||||
export TEST_FILE=/path/to/dataset/wiki.test.raw
|
||||
|
||||
python run_lm_finetuning.py \
|
||||
--output_dir=output \
|
||||
--model_type=gpt2 \
|
||||
--model_name_or_path=gpt2 \
|
||||
--do_train \
|
||||
--train_data_file=$TRAIN_FILE \
|
||||
--do_eval \
|
||||
--eval_data_file=$TEST_FILE
|
||||
git clone https://github.com/huggingface/transformers
|
||||
cd transformers
|
||||
pip install .
|
||||
pip install -r ./examples/requirements.txt
|
||||
```
|
||||
|
||||
This takes about half an hour to train on a single K80 GPU and about one minute for the evaluation to run. It reaches
|
||||
a score of ~20 perplexity once fine-tuned on the dataset.
|
||||
## One-click Deploy to Cloud (wip)
|
||||
|
||||
### RoBERTa/BERT and masked language modeling
|
||||
#### Azure
|
||||
|
||||
The following example fine-tunes RoBERTa on WikiText-2. Here too, we're using the raw WikiText-2. The loss is different
|
||||
as BERT/RoBERTa have a bidirectional mechanism; we're therefore using the same loss that was used during their
|
||||
pre-training: masked language modeling.
|
||||
[](https://portal.azure.com/#create/Microsoft.Template/uri/https%3A%2F%2Fraw.githubusercontent.com%2FAzure%2Fazure-quickstart-templates%2Fmaster%2F101-storage-account-create%2Fazuredeploy.json)
|
||||
|
||||
In accordance to the RoBERTa paper, we use dynamic masking rather than static masking. The model may, therefore, converge
|
||||
slightly slower (over-fitting takes more epochs).
|
||||
## Running on TPUs
|
||||
|
||||
We use the `--mlm` flag so that the script may change its loss function.
|
||||
When using Tensorflow, TPUs are supported out of the box as a `tf.distribute.Strategy`.
|
||||
|
||||
When using PyTorch, we support TPUs thanks to `pytorch/xla`. For more context and information on how to setup your TPU environment refer to Google's documentation and to the
|
||||
very detailed [pytorch/xla README](https://github.com/pytorch/xla/blob/master/README.md).
|
||||
|
||||
In this repo, we provide a very simple launcher script named [xla_spawn.py](https://github.com/huggingface/transformers/tree/master/examples/xla_spawn.py) that lets you run our example scripts on multiple TPU cores without any boilerplate.
|
||||
Just pass a `--num_cores` flag to this script, then your regular training script with its arguments (this is similar to the `torch.distributed.launch` helper for torch.distributed).
|
||||
|
||||
For example for `run_glue`:
|
||||
|
||||
```bash
|
||||
export TRAIN_FILE=/path/to/dataset/wiki.train.raw
|
||||
export TEST_FILE=/path/to/dataset/wiki.test.raw
|
||||
|
||||
python run_lm_finetuning.py \
|
||||
--output_dir=output \
|
||||
--model_type=roberta \
|
||||
--model_name_or_path=roberta-base \
|
||||
--do_train \
|
||||
--train_data_file=$TRAIN_FILE \
|
||||
--do_eval \
|
||||
--eval_data_file=$TEST_FILE \
|
||||
--mlm
|
||||
python examples/xla_spawn.py --num_cores 8 \
|
||||
examples/text-classification/run_glue.py
|
||||
--model_name_or_path bert-base-cased \
|
||||
--task_name mnli \
|
||||
--data_dir ./data/glue_data/MNLI \
|
||||
--output_dir ./models/tpu \
|
||||
--overwrite_output_dir \
|
||||
--do_train \
|
||||
--do_eval \
|
||||
--num_train_epochs 1 \
|
||||
--save_steps 20000
|
||||
```
|
||||
|
||||
## Language generation
|
||||
|
||||
Based on the script [`run_generation.py`](https://github.com/huggingface/transformers/blob/master/examples/run_generation.py).
|
||||
|
||||
Conditional text generation using the auto-regressive models of the library: GPT, GPT-2, Transformer-XL and XLNet.
|
||||
A similar script is used for our official demo [Write With Transfomer](https://transformer.huggingface.co), where you
|
||||
can try out the different models available in the library.
|
||||
|
||||
Example usage:
|
||||
|
||||
```bash
|
||||
python run_generation.py \
|
||||
--model_type=gpt2 \
|
||||
--model_name_or_path=gpt2
|
||||
```
|
||||
|
||||
## GLUE
|
||||
|
||||
Based on the script [`run_glue.py`](https://github.com/huggingface/transformers/blob/master/examples/run_glue.py).
|
||||
|
||||
Fine-tuning the library models for sequence classification on the GLUE benchmark: [General Language Understanding
|
||||
Evaluation](https://gluebenchmark.com/). This script can fine-tune the following models: BERT, XLM, XLNet and RoBERTa.
|
||||
|
||||
GLUE is made up of a total of 9 different tasks. We get the following results on the dev set of the benchmark with an
|
||||
uncased BERT base model (the checkpoint `bert-base-uncased`). All experiments ran on 8 V100 GPUs with a total train
|
||||
batch size of 24. Some of these tasks have a small dataset and training can lead to high variance in the results
|
||||
between different runs. We report the median on 5 runs (with different seeds) for each of the metrics.
|
||||
|
||||
| Task | Metric | Result |
|
||||
|-------|------------------------------|-------------|
|
||||
| CoLA | Matthew's corr | 48.87 |
|
||||
| SST-2 | Accuracy | 91.74 |
|
||||
| MRPC | F1/Accuracy | 90.70/86.27 |
|
||||
| STS-B | Person/Spearman corr. | 91.39/91.04 |
|
||||
| QQP | Accuracy/F1 | 90.79/87.66 |
|
||||
| MNLI | Matched acc./Mismatched acc. | 83.70/84.83 |
|
||||
| QNLI | Accuracy | 89.31 |
|
||||
| RTE | Accuracy | 71.43 |
|
||||
| WNLI | Accuracy | 43.66 |
|
||||
|
||||
Some of these results are significantly different from the ones reported on the test set
|
||||
of GLUE benchmark on the website. For QQP and WNLI, please refer to [FAQ #12](https://gluebenchmark.com/faq) on the webite.
|
||||
|
||||
Before running anyone of these GLUE tasks you should download the
|
||||
[GLUE data](https://gluebenchmark.com/tasks) by running
|
||||
[this script](https://gist.github.com/W4ngatang/60c2bdb54d156a41194446737ce03e2e)
|
||||
and unpack it to some directory `$GLUE_DIR`.
|
||||
|
||||
```bash
|
||||
export GLUE_DIR=/path/to/glue
|
||||
export TASK_NAME=MRPC
|
||||
|
||||
python run_glue.py \
|
||||
--model_type bert \
|
||||
--model_name_or_path bert-base-cased \
|
||||
--task_name $TASK_NAME \
|
||||
--do_train \
|
||||
--do_eval \
|
||||
--do_lower_case \
|
||||
--data_dir $GLUE_DIR/$TASK_NAME \
|
||||
--max_seq_length 128 \
|
||||
--per_gpu_train_batch_size 32 \
|
||||
--learning_rate 2e-5 \
|
||||
--num_train_epochs 3.0 \
|
||||
--output_dir /tmp/$TASK_NAME/
|
||||
```
|
||||
|
||||
where task name can be one of CoLA, SST-2, MRPC, STS-B, QQP, MNLI, QNLI, RTE, WNLI.
|
||||
|
||||
The dev set results will be present within the text file `eval_results.txt` in the specified output_dir.
|
||||
In case of MNLI, since there are two separate dev sets (matched and mismatched), there will be a separate
|
||||
output folder called `/tmp/MNLI-MM/` in addition to `/tmp/MNLI/`.
|
||||
|
||||
The code has not been tested with half-precision training with apex on any GLUE task apart from MRPC, MNLI,
|
||||
CoLA, SST-2. The following section provides details on how to run half-precision training with MRPC. With that being
|
||||
said, there shouldn’t be any issues in running half-precision training with the remaining GLUE tasks as well,
|
||||
since the data processor for each task inherits from the base class DataProcessor.
|
||||
|
||||
### MRPC
|
||||
|
||||
#### Fine-tuning example
|
||||
|
||||
The following examples fine-tune BERT on the Microsoft Research Paraphrase Corpus (MRPC) corpus and runs in less
|
||||
than 10 minutes on a single K-80 and in 27 seconds (!) on single tesla V100 16GB with apex installed.
|
||||
|
||||
Before running anyone of these GLUE tasks you should download the
|
||||
[GLUE data](https://gluebenchmark.com/tasks) by running
|
||||
[this script](https://gist.github.com/W4ngatang/60c2bdb54d156a41194446737ce03e2e)
|
||||
and unpack it to some directory `$GLUE_DIR`.
|
||||
|
||||
```bash
|
||||
export GLUE_DIR=/path/to/glue
|
||||
|
||||
python run_glue.py \
|
||||
--model_type bert \
|
||||
--model_name_or_path bert-base-cased \
|
||||
--task_name MRPC \
|
||||
--do_train \
|
||||
--do_eval \
|
||||
--do_lower_case \
|
||||
--data_dir $GLUE_DIR/MRPC/ \
|
||||
--max_seq_length 128 \
|
||||
--per_gpu_train_batch_size 32 \
|
||||
--learning_rate 2e-5 \
|
||||
--num_train_epochs 3.0 \
|
||||
--output_dir /tmp/mrpc_output/
|
||||
```
|
||||
|
||||
Our test ran on a few seeds with [the original implementation hyper-
|
||||
parameters](https://github.com/google-research/bert#sentence-and-sentence-pair-classification-tasks) gave evaluation
|
||||
results between 84% and 88%.
|
||||
|
||||
#### Using Apex and mixed-precision
|
||||
|
||||
Using Apex and 16 bit precision, the fine-tuning on MRPC only takes 27 seconds. First install
|
||||
[apex](https://github.com/NVIDIA/apex), then run the following example:
|
||||
|
||||
```bash
|
||||
export GLUE_DIR=/path/to/glue
|
||||
|
||||
python run_glue.py \
|
||||
--model_type bert \
|
||||
--model_name_or_path bert-base-cased \
|
||||
--task_name MRPC \
|
||||
--do_train \
|
||||
--do_eval \
|
||||
--do_lower_case \
|
||||
--data_dir $GLUE_DIR/MRPC/ \
|
||||
--max_seq_length 128 \
|
||||
--per_gpu_train_batch_size 32 \
|
||||
--learning_rate 2e-5 \
|
||||
--num_train_epochs 3.0 \
|
||||
--output_dir /tmp/mrpc_output/ \
|
||||
--fp16
|
||||
```
|
||||
|
||||
#### Distributed training
|
||||
|
||||
Here is an example using distributed training on 8 V100 GPUs. The model used is the BERT whole-word-masking and it
|
||||
reaches F1 > 92 on MRPC.
|
||||
|
||||
```bash
|
||||
export GLUE_DIR=/path/to/glue
|
||||
|
||||
python -m torch.distributed.launch \
|
||||
--nproc_per_node 8 run_glue.py \
|
||||
--model_type bert \
|
||||
--model_name_or_path bert-base-cased \
|
||||
--task_name MRPC \
|
||||
--do_train \
|
||||
--do_eval \
|
||||
--do_lower_case \
|
||||
--data_dir $GLUE_DIR/MRPC/ \
|
||||
--max_seq_length 128 \
|
||||
--per_gpu_train_batch_size 8 \
|
||||
--learning_rate 2e-5 \
|
||||
--num_train_epochs 3.0 \
|
||||
--output_dir /tmp/mrpc_output/
|
||||
```
|
||||
|
||||
Training with these hyper-parameters gave us the following results:
|
||||
|
||||
```bash
|
||||
acc = 0.8823529411764706
|
||||
acc_and_f1 = 0.901702786377709
|
||||
eval_loss = 0.3418912578906332
|
||||
f1 = 0.9210526315789473
|
||||
global_step = 174
|
||||
loss = 0.07231863956341798
|
||||
```
|
||||
|
||||
### MNLI
|
||||
|
||||
The following example uses the BERT-large, uncased, whole-word-masking model and fine-tunes it on the MNLI task.
|
||||
|
||||
```bash
|
||||
export GLUE_DIR=/path/to/glue
|
||||
|
||||
python -m torch.distributed.launch \
|
||||
--nproc_per_node 8 run_glue.py \
|
||||
--model_type bert \
|
||||
--model_name_or_path bert-base-cased \
|
||||
--task_name mnli \
|
||||
--do_train \
|
||||
--do_eval \
|
||||
--do_lower_case \
|
||||
--data_dir $GLUE_DIR/MNLI/ \
|
||||
--max_seq_length 128 \
|
||||
--per_gpu_train_batch_size 8 \
|
||||
--learning_rate 2e-5 \
|
||||
--num_train_epochs 3.0 \
|
||||
--output_dir output_dir \
|
||||
```
|
||||
|
||||
The results are the following:
|
||||
|
||||
```bash
|
||||
***** Eval results *****
|
||||
acc = 0.8679706601466992
|
||||
eval_loss = 0.4911287787382479
|
||||
global_step = 18408
|
||||
loss = 0.04755385363816904
|
||||
|
||||
***** Eval results *****
|
||||
acc = 0.8747965825874695
|
||||
eval_loss = 0.45516540421714036
|
||||
global_step = 18408
|
||||
loss = 0.04755385363816904
|
||||
```
|
||||
|
||||
##Multiple Choice
|
||||
|
||||
Based on the script [`run_multiple_choice.py`]().
|
||||
|
||||
#### Fine-tuning on SWAG
|
||||
Download [swag](https://github.com/rowanz/swagaf/tree/master/data) data
|
||||
|
||||
```
|
||||
#training on 4 tesla V100(16GB) GPUS
|
||||
export SWAG_DIR=/path/to/swag_data_dir
|
||||
python ./examples/single_model_scripts/run_multiple_choice.py \
|
||||
--model_type roberta \
|
||||
--task_name swag \
|
||||
--model_name_or_path roberta-base \
|
||||
--do_train \
|
||||
--do_eval \
|
||||
--do_lower_case \
|
||||
--data_dir $SWAG_DIR \
|
||||
--learning_rate 5e-5 \
|
||||
--num_train_epochs 3 \
|
||||
--max_seq_length 80 \
|
||||
--output_dir models_bert/swag_base \
|
||||
--per_gpu_eval_batch_size=16 \
|
||||
--per_gpu_train_batch_size=16 \
|
||||
--gradient_accumulation_steps 2 \
|
||||
--overwrite_output
|
||||
```
|
||||
Training with the defined hyper-parameters yields the following results:
|
||||
```
|
||||
***** Eval results *****
|
||||
eval_acc = 0.8338998300509847
|
||||
eval_loss = 0.44457291918821606
|
||||
```
|
||||
|
||||
## SQuAD
|
||||
|
||||
Based on the script [`run_squad.py`](https://github.com/huggingface/transformers/blob/master/examples/run_squad.py).
|
||||
|
||||
#### Fine-tuning on SQuAD
|
||||
|
||||
This example code fine-tunes BERT on the SQuAD dataset. It runs in 24 min (with BERT-base) or 68 min (with BERT-large)
|
||||
on a single tesla V100 16GB. The data for SQuAD can be downloaded with the following links and should be saved in a
|
||||
$SQUAD_DIR directory.
|
||||
|
||||
* [train-v1.1.json](https://rajpurkar.github.io/SQuAD-explorer/dataset/train-v1.1.json)
|
||||
* [dev-v1.1.json](https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v1.1.json)
|
||||
* [evaluate-v1.1.py](https://github.com/allenai/bi-att-flow/blob/master/squad/evaluate-v1.1.py)
|
||||
|
||||
```bash
|
||||
export SQUAD_DIR=/path/to/SQUAD
|
||||
|
||||
python run_squad.py \
|
||||
--model_type bert \
|
||||
--model_name_or_path bert-base-cased \
|
||||
--do_train \
|
||||
--do_eval \
|
||||
--do_lower_case \
|
||||
--train_file $SQUAD_DIR/train-v1.1.json \
|
||||
--predict_file $SQUAD_DIR/dev-v1.1.json \
|
||||
--per_gpu_train_batch_size 12 \
|
||||
--learning_rate 3e-5 \
|
||||
--num_train_epochs 2.0 \
|
||||
--max_seq_length 384 \
|
||||
--doc_stride 128 \
|
||||
--output_dir /tmp/debug_squad/
|
||||
```
|
||||
|
||||
Training with the previously defined hyper-parameters yields the following results:
|
||||
|
||||
```bash
|
||||
f1 = 88.52
|
||||
exact_match = 81.22
|
||||
```
|
||||
|
||||
#### Distributed training
|
||||
|
||||
|
||||
Here is an example using distributed training on 8 V100 GPUs and Bert Whole Word Masking uncased model to reach a F1 > 93 on SQuAD:
|
||||
|
||||
```bash
|
||||
python -m torch.distributed.launch --nproc_per_node=8 run_squad.py \
|
||||
--model_type bert \
|
||||
--model_name_or_path bert-base-cased \
|
||||
--do_train \
|
||||
--do_eval \
|
||||
--do_lower_case \
|
||||
--train_file $SQUAD_DIR/train-v1.1.json \
|
||||
--predict_file $SQUAD_DIR/dev-v1.1.json \
|
||||
--learning_rate 3e-5 \
|
||||
--num_train_epochs 2 \
|
||||
--max_seq_length 384 \
|
||||
--doc_stride 128 \
|
||||
--output_dir ../models/wwm_uncased_finetuned_squad/ \
|
||||
--per_gpu_train_batch_size 24 \
|
||||
--gradient_accumulation_steps 12
|
||||
```
|
||||
|
||||
Training with the previously defined hyper-parameters yields the following results:
|
||||
|
||||
```bash
|
||||
f1 = 93.15
|
||||
exact_match = 86.91
|
||||
```
|
||||
|
||||
This fine-tuneds model is available as a checkpoint under the reference
|
||||
`bert-large-uncased-whole-word-masking-finetuned-squad`.
|
||||
|
||||
Feedback and more use cases and benchmarks involving TPUs are welcome, please share with the community.
|
||||
|
||||
38
examples/adversarial/README.md
Normal file
38
examples/adversarial/README.md
Normal file
@ -0,0 +1,38 @@
|
||||
## Adversarial evaluation of model performances
|
||||
|
||||
Here is an example on evaluating a model using adversarial evaluation of natural language inference with the Heuristic Analysis for NLI Systems (HANS) dataset [McCoy et al., 2019](https://arxiv.org/abs/1902.01007). The example was gracefully provided by [Nafise Sadat Moosavi](https://github.com/ns-moosavi).
|
||||
|
||||
The HANS dataset can be downloaded from [this location](https://github.com/tommccoy1/hans).
|
||||
|
||||
This is an example of using test_hans.py:
|
||||
|
||||
```bash
|
||||
export HANS_DIR=path-to-hans
|
||||
export MODEL_TYPE=type-of-the-model-e.g.-bert-roberta-xlnet-etc
|
||||
export MODEL_PATH=path-to-the-model-directory-that-is-trained-on-NLI-e.g.-by-using-run_glue.py
|
||||
|
||||
python run_hans.py \
|
||||
--task_name hans \
|
||||
--model_type $MODEL_TYPE \
|
||||
--do_eval \
|
||||
--data_dir $HANS_DIR \
|
||||
--model_name_or_path $MODEL_PATH \
|
||||
--max_seq_length 128 \
|
||||
--output_dir $MODEL_PATH \
|
||||
```
|
||||
|
||||
This will create the hans_predictions.txt file in MODEL_PATH, which can then be evaluated using hans/evaluate_heur_output.py from the HANS dataset.
|
||||
|
||||
The results of the BERT-base model that is trained on MNLI using batch size 8 and the random seed 42 on the HANS dataset is as follows:
|
||||
|
||||
```bash
|
||||
Heuristic entailed results:
|
||||
lexical_overlap: 0.9702
|
||||
subsequence: 0.9942
|
||||
constituent: 0.9962
|
||||
|
||||
Heuristic non-entailed results:
|
||||
lexical_overlap: 0.199
|
||||
subsequence: 0.0396
|
||||
constituent: 0.118
|
||||
```
|
||||
231
examples/adversarial/run_hans.py
Normal file
231
examples/adversarial/run_hans.py
Normal file
@ -0,0 +1,231 @@
|
||||
# coding=utf-8
|
||||
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
|
||||
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
""" Finetuning the library models for sequence classification on HANS."""
|
||||
|
||||
import logging
|
||||
import os
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Dict, List, Optional
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
|
||||
from transformers import (
|
||||
AutoConfig,
|
||||
AutoModelForSequenceClassification,
|
||||
AutoTokenizer,
|
||||
HfArgumentParser,
|
||||
Trainer,
|
||||
TrainingArguments,
|
||||
default_data_collator,
|
||||
set_seed,
|
||||
)
|
||||
from utils_hans import HansDataset, InputFeatures, hans_processors, hans_tasks_num_labels
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@dataclass
|
||||
class ModelArguments:
|
||||
"""
|
||||
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
|
||||
"""
|
||||
|
||||
model_name_or_path: str = field(
|
||||
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
|
||||
)
|
||||
config_name: Optional[str] = field(
|
||||
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
|
||||
)
|
||||
tokenizer_name: Optional[str] = field(
|
||||
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
|
||||
)
|
||||
cache_dir: Optional[str] = field(
|
||||
default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"}
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class DataTrainingArguments:
|
||||
"""
|
||||
Arguments pertaining to what data we are going to input our model for training and eval.
|
||||
"""
|
||||
|
||||
task_name: str = field(
|
||||
metadata={"help": "The name of the task to train selected in the list: " + ", ".join(hans_processors.keys())}
|
||||
)
|
||||
data_dir: str = field(
|
||||
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."}
|
||||
)
|
||||
max_seq_length: int = field(
|
||||
default=128,
|
||||
metadata={
|
||||
"help": "The maximum total input sequence length after tokenization. Sequences longer "
|
||||
"than this will be truncated, sequences shorter will be padded."
|
||||
},
|
||||
)
|
||||
overwrite_cache: bool = field(
|
||||
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
|
||||
)
|
||||
|
||||
|
||||
def hans_data_collator(features: List[InputFeatures]) -> Dict[str, torch.Tensor]:
|
||||
"""
|
||||
Data collator that removes the "pairID" key if present.
|
||||
"""
|
||||
batch = default_data_collator(features)
|
||||
_ = batch.pop("pairID", None)
|
||||
return batch
|
||||
|
||||
|
||||
def main():
|
||||
# See all possible arguments in src/transformers/training_args.py
|
||||
# or by passing the --help flag to this script.
|
||||
# We now keep distinct sets of args, for a cleaner separation of concerns.
|
||||
|
||||
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
|
||||
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
|
||||
|
||||
if (
|
||||
os.path.exists(training_args.output_dir)
|
||||
and os.listdir(training_args.output_dir)
|
||||
and training_args.do_train
|
||||
and not training_args.overwrite_output_dir
|
||||
):
|
||||
raise ValueError(
|
||||
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome."
|
||||
)
|
||||
|
||||
# Setup logging
|
||||
logging.basicConfig(
|
||||
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
|
||||
datefmt="%m/%d/%Y %H:%M:%S",
|
||||
level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN,
|
||||
)
|
||||
logger.warning(
|
||||
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
|
||||
training_args.local_rank,
|
||||
training_args.device,
|
||||
training_args.n_gpu,
|
||||
bool(training_args.local_rank != -1),
|
||||
training_args.fp16,
|
||||
)
|
||||
logger.info("Training/evaluation parameters %s", training_args)
|
||||
|
||||
# Set seed
|
||||
set_seed(training_args.seed)
|
||||
|
||||
try:
|
||||
num_labels = hans_tasks_num_labels[data_args.task_name]
|
||||
except KeyError:
|
||||
raise ValueError("Task not found: %s" % (data_args.task_name))
|
||||
|
||||
# Load pretrained model and tokenizer
|
||||
#
|
||||
# Distributed training:
|
||||
# The .from_pretrained methods guarantee that only one local process can concurrently
|
||||
# download model & vocab.
|
||||
|
||||
config = AutoConfig.from_pretrained(
|
||||
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
|
||||
num_labels=num_labels,
|
||||
finetuning_task=data_args.task_name,
|
||||
cache_dir=model_args.cache_dir,
|
||||
)
|
||||
tokenizer = AutoTokenizer.from_pretrained(
|
||||
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
|
||||
cache_dir=model_args.cache_dir,
|
||||
)
|
||||
model = AutoModelForSequenceClassification.from_pretrained(
|
||||
model_args.model_name_or_path,
|
||||
from_tf=bool(".ckpt" in model_args.model_name_or_path),
|
||||
config=config,
|
||||
cache_dir=model_args.cache_dir,
|
||||
)
|
||||
|
||||
# Get datasets
|
||||
train_dataset = (
|
||||
HansDataset(
|
||||
data_dir=data_args.data_dir,
|
||||
tokenizer=tokenizer,
|
||||
task=data_args.task_name,
|
||||
max_seq_length=data_args.max_seq_length,
|
||||
overwrite_cache=data_args.overwrite_cache,
|
||||
)
|
||||
if training_args.do_train
|
||||
else None
|
||||
)
|
||||
eval_dataset = (
|
||||
HansDataset(
|
||||
data_dir=data_args.data_dir,
|
||||
tokenizer=tokenizer,
|
||||
task=data_args.task_name,
|
||||
max_seq_length=data_args.max_seq_length,
|
||||
overwrite_cache=data_args.overwrite_cache,
|
||||
evaluate=True,
|
||||
)
|
||||
if training_args.do_eval
|
||||
else None
|
||||
)
|
||||
|
||||
# Initialize our Trainer
|
||||
trainer = Trainer(
|
||||
model=model,
|
||||
args=training_args,
|
||||
train_dataset=train_dataset,
|
||||
eval_dataset=eval_dataset,
|
||||
data_collator=hans_data_collator,
|
||||
)
|
||||
|
||||
# Training
|
||||
if training_args.do_train:
|
||||
trainer.train(
|
||||
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None
|
||||
)
|
||||
trainer.save_model()
|
||||
# For convenience, we also re-save the tokenizer to the same directory,
|
||||
# so that you can share your model easily on huggingface.co/models =)
|
||||
if trainer.is_world_master():
|
||||
tokenizer.save_pretrained(training_args.output_dir)
|
||||
|
||||
# Evaluation
|
||||
if training_args.do_eval:
|
||||
logger.info("*** Evaluate ***")
|
||||
|
||||
output = trainer.predict(eval_dataset)
|
||||
preds = output.predictions
|
||||
preds = np.argmax(preds, axis=1)
|
||||
|
||||
pair_ids = [ex.pairID for ex in eval_dataset]
|
||||
output_eval_file = os.path.join(training_args.output_dir, "hans_predictions.txt")
|
||||
label_list = eval_dataset.get_labels()
|
||||
if trainer.is_world_master():
|
||||
with open(output_eval_file, "w") as writer:
|
||||
writer.write("pairID,gold_label\n")
|
||||
for pid, pred in zip(pair_ids, preds):
|
||||
writer.write("ex" + str(pid) + "," + label_list[int(pred)] + "\n")
|
||||
|
||||
trainer._log(output.metrics)
|
||||
|
||||
|
||||
def _mp_fn(index):
|
||||
# For xla_spawn (TPUs)
|
||||
main()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
331
examples/adversarial/utils_hans.py
Normal file
331
examples/adversarial/utils_hans.py
Normal file
@ -0,0 +1,331 @@
|
||||
# coding=utf-8
|
||||
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
|
||||
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
import os
|
||||
from dataclasses import dataclass
|
||||
from typing import List, Optional, Union
|
||||
|
||||
import tqdm
|
||||
from filelock import FileLock
|
||||
|
||||
from transformers import (
|
||||
BartTokenizer,
|
||||
BartTokenizerFast,
|
||||
DataProcessor,
|
||||
PreTrainedTokenizer,
|
||||
RobertaTokenizer,
|
||||
RobertaTokenizerFast,
|
||||
XLMRobertaTokenizer,
|
||||
is_tf_available,
|
||||
is_torch_available,
|
||||
)
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class InputExample:
|
||||
"""
|
||||
A single training/test example for simple sequence classification.
|
||||
|
||||
Args:
|
||||
guid: Unique id for the example.
|
||||
text_a: string. The untokenized text of the first sequence. For single
|
||||
sequence tasks, only this sequence must be specified.
|
||||
text_b: (Optional) string. The untokenized text of the second sequence.
|
||||
Only must be specified for sequence pair tasks.
|
||||
label: (Optional) string. The label of the example. This should be
|
||||
specified for train and dev examples, but not for test examples.
|
||||
pairID: (Optional) string. Unique identifier for the pair of sentences.
|
||||
"""
|
||||
|
||||
guid: str
|
||||
text_a: str
|
||||
text_b: Optional[str] = None
|
||||
label: Optional[str] = None
|
||||
pairID: Optional[str] = None
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class InputFeatures:
|
||||
"""
|
||||
A single set of features of data.
|
||||
Property names are the same names as the corresponding inputs to a model.
|
||||
|
||||
Args:
|
||||
input_ids: Indices of input sequence tokens in the vocabulary.
|
||||
attention_mask: Mask to avoid performing attention on padding token indices.
|
||||
Mask values selected in ``[0, 1]``:
|
||||
Usually ``1`` for tokens that are NOT MASKED, ``0`` for MASKED (padded) tokens.
|
||||
token_type_ids: (Optional) Segment token indices to indicate first and second
|
||||
portions of the inputs. Only some models use them.
|
||||
label: (Optional) Label corresponding to the input. Int for classification problems,
|
||||
float for regression problems.
|
||||
pairID: (Optional) Unique identifier for the pair of sentences.
|
||||
"""
|
||||
|
||||
input_ids: List[int]
|
||||
attention_mask: Optional[List[int]] = None
|
||||
token_type_ids: Optional[List[int]] = None
|
||||
label: Optional[Union[int, float]] = None
|
||||
pairID: Optional[int] = None
|
||||
|
||||
|
||||
if is_torch_available():
|
||||
import torch
|
||||
from torch.utils.data.dataset import Dataset
|
||||
|
||||
class HansDataset(Dataset):
|
||||
"""
|
||||
This will be superseded by a framework-agnostic approach
|
||||
soon.
|
||||
"""
|
||||
|
||||
features: List[InputFeatures]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
data_dir: str,
|
||||
tokenizer: PreTrainedTokenizer,
|
||||
task: str,
|
||||
max_seq_length: Optional[int] = None,
|
||||
overwrite_cache=False,
|
||||
evaluate: bool = False,
|
||||
):
|
||||
processor = hans_processors[task]()
|
||||
|
||||
cached_features_file = os.path.join(
|
||||
data_dir,
|
||||
"cached_{}_{}_{}_{}".format(
|
||||
"dev" if evaluate else "train", tokenizer.__class__.__name__, str(max_seq_length), task,
|
||||
),
|
||||
)
|
||||
label_list = processor.get_labels()
|
||||
if tokenizer.__class__ in (
|
||||
RobertaTokenizer,
|
||||
RobertaTokenizerFast,
|
||||
XLMRobertaTokenizer,
|
||||
BartTokenizer,
|
||||
BartTokenizerFast,
|
||||
):
|
||||
# HACK(label indices are swapped in RoBERTa pretrained model)
|
||||
label_list[1], label_list[2] = label_list[2], label_list[1]
|
||||
self.label_list = label_list
|
||||
|
||||
# Make sure only the first process in distributed training processes the dataset,
|
||||
# and the others will use the cache.
|
||||
lock_path = cached_features_file + ".lock"
|
||||
with FileLock(lock_path):
|
||||
|
||||
if os.path.exists(cached_features_file) and not overwrite_cache:
|
||||
logger.info(f"Loading features from cached file {cached_features_file}")
|
||||
self.features = torch.load(cached_features_file)
|
||||
else:
|
||||
logger.info(f"Creating features from dataset file at {data_dir}")
|
||||
|
||||
examples = (
|
||||
processor.get_dev_examples(data_dir) if evaluate else processor.get_train_examples(data_dir)
|
||||
)
|
||||
|
||||
logger.info("Training examples: %s", len(examples))
|
||||
self.features = hans_convert_examples_to_features(examples, label_list, max_seq_length, tokenizer)
|
||||
logger.info("Saving features into cached file %s", cached_features_file)
|
||||
torch.save(self.features, cached_features_file)
|
||||
|
||||
def __len__(self):
|
||||
return len(self.features)
|
||||
|
||||
def __getitem__(self, i) -> InputFeatures:
|
||||
return self.features[i]
|
||||
|
||||
def get_labels(self):
|
||||
return self.label_list
|
||||
|
||||
|
||||
if is_tf_available():
|
||||
import tensorflow as tf
|
||||
|
||||
class TFHansDataset:
|
||||
"""
|
||||
This will be superseded by a framework-agnostic approach
|
||||
soon.
|
||||
"""
|
||||
|
||||
features: List[InputFeatures]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
data_dir: str,
|
||||
tokenizer: PreTrainedTokenizer,
|
||||
task: str,
|
||||
max_seq_length: Optional[int] = 128,
|
||||
overwrite_cache=False,
|
||||
evaluate: bool = False,
|
||||
):
|
||||
processor = hans_processors[task]()
|
||||
label_list = processor.get_labels()
|
||||
if tokenizer.__class__ in (
|
||||
RobertaTokenizer,
|
||||
RobertaTokenizerFast,
|
||||
XLMRobertaTokenizer,
|
||||
BartTokenizer,
|
||||
BartTokenizerFast,
|
||||
):
|
||||
# HACK(label indices are swapped in RoBERTa pretrained model)
|
||||
label_list[1], label_list[2] = label_list[2], label_list[1]
|
||||
self.label_list = label_list
|
||||
|
||||
examples = processor.get_dev_examples(data_dir) if evaluate else processor.get_train_examples(data_dir)
|
||||
self.features = hans_convert_examples_to_features(examples, label_list, max_seq_length, tokenizer)
|
||||
|
||||
def gen():
|
||||
for (ex_index, ex) in tqdm.tqdm(enumerate(self.features), desc="convert examples to features"):
|
||||
if ex_index % 10000 == 0:
|
||||
logger.info("Writing example %d of %d" % (ex_index, len(examples)))
|
||||
|
||||
yield (
|
||||
{
|
||||
"example_id": 0,
|
||||
"input_ids": ex.input_ids,
|
||||
"attention_mask": ex.attention_mask,
|
||||
"token_type_ids": ex.token_type_ids,
|
||||
},
|
||||
ex.label,
|
||||
)
|
||||
|
||||
self.dataset = tf.data.Dataset.from_generator(
|
||||
gen,
|
||||
(
|
||||
{
|
||||
"example_id": tf.int32,
|
||||
"input_ids": tf.int32,
|
||||
"attention_mask": tf.int32,
|
||||
"token_type_ids": tf.int32,
|
||||
},
|
||||
tf.int64,
|
||||
),
|
||||
(
|
||||
{
|
||||
"example_id": tf.TensorShape([]),
|
||||
"input_ids": tf.TensorShape([None, None]),
|
||||
"attention_mask": tf.TensorShape([None, None]),
|
||||
"token_type_ids": tf.TensorShape([None, None]),
|
||||
},
|
||||
tf.TensorShape([]),
|
||||
),
|
||||
)
|
||||
|
||||
def get_dataset(self):
|
||||
return self.dataset
|
||||
|
||||
def __len__(self):
|
||||
return len(self.features)
|
||||
|
||||
def __getitem__(self, i) -> InputFeatures:
|
||||
return self.features[i]
|
||||
|
||||
def get_labels(self):
|
||||
return self.label_list
|
||||
|
||||
|
||||
class HansProcessor(DataProcessor):
|
||||
"""Processor for the HANS data set."""
|
||||
|
||||
def get_train_examples(self, data_dir):
|
||||
"""See base class."""
|
||||
return self._create_examples(self._read_tsv(os.path.join(data_dir, "heuristics_train_set.txt")), "train")
|
||||
|
||||
def get_dev_examples(self, data_dir):
|
||||
"""See base class."""
|
||||
return self._create_examples(self._read_tsv(os.path.join(data_dir, "heuristics_evaluation_set.txt")), "dev")
|
||||
|
||||
def get_labels(self):
|
||||
"""See base class."""
|
||||
return ["contradiction", "entailment", "neutral"]
|
||||
|
||||
def _create_examples(self, lines, set_type):
|
||||
"""Creates examples for the training and dev sets."""
|
||||
examples = []
|
||||
for (i, line) in enumerate(lines):
|
||||
if i == 0:
|
||||
continue
|
||||
guid = "%s-%s" % (set_type, line[0])
|
||||
text_a = line[5]
|
||||
text_b = line[6]
|
||||
pairID = line[7][2:] if line[7].startswith("ex") else line[7]
|
||||
label = line[-1]
|
||||
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label, pairID=pairID))
|
||||
return examples
|
||||
|
||||
|
||||
def hans_convert_examples_to_features(
|
||||
examples: List[InputExample], label_list: List[str], max_length: int, tokenizer: PreTrainedTokenizer,
|
||||
):
|
||||
"""
|
||||
Loads a data file into a list of ``InputFeatures``
|
||||
|
||||
Args:
|
||||
examples: List of ``InputExamples`` containing the examples.
|
||||
tokenizer: Instance of a tokenizer that will tokenize the examples.
|
||||
max_length: Maximum example length.
|
||||
label_list: List of labels. Can be obtained from the processor using the ``processor.get_labels()`` method.
|
||||
output_mode: String indicating the output mode. Either ``regression`` or ``classification``.
|
||||
|
||||
Returns:
|
||||
A list of task-specific ``InputFeatures`` which can be fed to the model.
|
||||
|
||||
"""
|
||||
|
||||
label_map = {label: i for i, label in enumerate(label_list)}
|
||||
|
||||
features = []
|
||||
for (ex_index, example) in tqdm.tqdm(enumerate(examples), desc="convert examples to features"):
|
||||
if ex_index % 10000 == 0:
|
||||
logger.info("Writing example %d" % (ex_index))
|
||||
|
||||
inputs = tokenizer(
|
||||
example.text_a,
|
||||
example.text_b,
|
||||
add_special_tokens=True,
|
||||
max_length=max_length,
|
||||
padding="max_length",
|
||||
truncation=True,
|
||||
return_overflowing_tokens=True,
|
||||
)
|
||||
|
||||
label = label_map[example.label] if example.label in label_map else 0
|
||||
|
||||
pairID = int(example.pairID)
|
||||
|
||||
features.append(InputFeatures(**inputs, label=label, pairID=pairID))
|
||||
|
||||
for i, example in enumerate(examples[:5]):
|
||||
logger.info("*** Example ***")
|
||||
logger.info(f"guid: {example}")
|
||||
logger.info(f"features: {features[i]}")
|
||||
|
||||
return features
|
||||
|
||||
|
||||
hans_tasks_num_labels = {
|
||||
"hans": 3,
|
||||
}
|
||||
|
||||
hans_processors = {
|
||||
"hans": HansProcessor,
|
||||
}
|
||||
160
examples/benchmarking/plot_csv_file.py
Normal file
160
examples/benchmarking/plot_csv_file.py
Normal file
@ -0,0 +1,160 @@
|
||||
import csv
|
||||
from collections import defaultdict
|
||||
from dataclasses import dataclass, field
|
||||
from typing import List, Optional
|
||||
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
from matplotlib.ticker import ScalarFormatter
|
||||
|
||||
from transformers import HfArgumentParser
|
||||
|
||||
|
||||
def list_field(default=None, metadata=None):
|
||||
return field(default_factory=lambda: default, metadata=metadata)
|
||||
|
||||
|
||||
@dataclass
|
||||
class PlotArguments:
|
||||
"""
|
||||
Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch.
|
||||
"""
|
||||
|
||||
csv_file: str = field(metadata={"help": "The csv file to plot."},)
|
||||
plot_along_batch: bool = field(
|
||||
default=False,
|
||||
metadata={"help": "Whether to plot along batch size or sequence lengh. Defaults to sequence length."},
|
||||
)
|
||||
is_time: bool = field(
|
||||
default=False,
|
||||
metadata={"help": "Whether the csv file has time results or memory results. Defaults to memory results."},
|
||||
)
|
||||
no_log_scale: bool = field(
|
||||
default=False, metadata={"help": "Disable logarithmic scale when plotting"},
|
||||
)
|
||||
is_train: bool = field(
|
||||
default=False,
|
||||
metadata={
|
||||
"help": "Whether the csv file has training results or inference results. Defaults to inference results."
|
||||
},
|
||||
)
|
||||
figure_png_file: Optional[str] = field(
|
||||
default=None, metadata={"help": "Filename under which the plot will be saved. If unused no plot is saved."},
|
||||
)
|
||||
short_model_names: Optional[List[str]] = list_field(
|
||||
default=None, metadata={"help": "List of model names that are used instead of the ones in the csv file."}
|
||||
)
|
||||
|
||||
|
||||
def can_convert_to_int(string):
|
||||
try:
|
||||
int(string)
|
||||
return True
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
|
||||
def can_convert_to_float(string):
|
||||
try:
|
||||
float(string)
|
||||
return True
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
|
||||
class Plot:
|
||||
def __init__(self, args):
|
||||
self.args = args
|
||||
self.result_dict = defaultdict(lambda: dict(bsz=[], seq_len=[], result={}))
|
||||
|
||||
with open(self.args.csv_file, newline="") as csv_file:
|
||||
reader = csv.DictReader(csv_file)
|
||||
for row in reader:
|
||||
model_name = row["model"]
|
||||
self.result_dict[model_name]["bsz"].append(int(row["batch_size"]))
|
||||
self.result_dict[model_name]["seq_len"].append(int(row["sequence_length"]))
|
||||
if can_convert_to_int(row["result"]):
|
||||
# value is not None
|
||||
self.result_dict[model_name]["result"][
|
||||
(int(row["batch_size"]), int(row["sequence_length"]))
|
||||
] = int(row["result"])
|
||||
elif can_convert_to_float(row["result"]):
|
||||
# value is not None
|
||||
self.result_dict[model_name]["result"][
|
||||
(int(row["batch_size"]), int(row["sequence_length"]))
|
||||
] = float(row["result"])
|
||||
|
||||
def plot(self):
|
||||
fig, ax = plt.subplots()
|
||||
title_str = "Time usage" if self.args.is_time else "Memory usage"
|
||||
title_str = title_str + " for training" if self.args.is_train else title_str + " for inference"
|
||||
|
||||
if not self.args.no_log_scale:
|
||||
# set logarithm scales
|
||||
ax.set_xscale("log")
|
||||
ax.set_yscale("log")
|
||||
|
||||
for axis in [ax.xaxis, ax.yaxis]:
|
||||
axis.set_major_formatter(ScalarFormatter())
|
||||
|
||||
for model_name_idx, model_name in enumerate(self.result_dict.keys()):
|
||||
batch_sizes = sorted(list(set(self.result_dict[model_name]["bsz"])))
|
||||
sequence_lengths = sorted(list(set(self.result_dict[model_name]["seq_len"])))
|
||||
results = self.result_dict[model_name]["result"]
|
||||
|
||||
(x_axis_array, inner_loop_array) = (
|
||||
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
|
||||
)
|
||||
|
||||
label_model_name = (
|
||||
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
|
||||
)
|
||||
|
||||
for inner_loop_value in inner_loop_array:
|
||||
if self.args.plot_along_batch:
|
||||
y_axis_array = np.asarray(
|
||||
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results],
|
||||
dtype=np.int,
|
||||
)
|
||||
else:
|
||||
y_axis_array = np.asarray(
|
||||
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results],
|
||||
dtype=np.float32,
|
||||
)
|
||||
|
||||
(x_axis_label, inner_loop_label) = (
|
||||
("batch_size", "len") if self.args.plot_along_batch else ("in #tokens", "bsz")
|
||||
)
|
||||
|
||||
x_axis_array = np.asarray(x_axis_array, np.int)[: len(y_axis_array)]
|
||||
plt.scatter(
|
||||
x_axis_array, y_axis_array, label=f"{label_model_name} - {inner_loop_label}: {inner_loop_value}"
|
||||
)
|
||||
plt.plot(x_axis_array, y_axis_array, "--")
|
||||
|
||||
title_str += f" {label_model_name} vs."
|
||||
|
||||
title_str = title_str[:-4]
|
||||
y_axis_label = "Time in s" if self.args.is_time else "Memory in MB"
|
||||
|
||||
# plot
|
||||
plt.title(title_str)
|
||||
plt.xlabel(x_axis_label)
|
||||
plt.ylabel(y_axis_label)
|
||||
plt.legend()
|
||||
|
||||
if self.args.figure_png_file is not None:
|
||||
plt.savefig(self.args.figure_png_file)
|
||||
else:
|
||||
plt.show()
|
||||
|
||||
|
||||
def main():
|
||||
parser = HfArgumentParser(PlotArguments)
|
||||
plot_args = parser.parse_args_into_dataclasses()[0]
|
||||
plot = Plot(args=plot_args)
|
||||
plot.plot()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
29
examples/benchmarking/run_benchmark.py
Normal file
29
examples/benchmarking/run_benchmark.py
Normal file
@ -0,0 +1,29 @@
|
||||
# coding=utf-8
|
||||
# Copyright 2020 The HuggingFace Inc. team.
|
||||
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
""" Benchmarking the library on inference and training """
|
||||
|
||||
from transformers import HfArgumentParser, PyTorchBenchmark, PyTorchBenchmarkArguments
|
||||
|
||||
|
||||
def main():
|
||||
parser = HfArgumentParser(PyTorchBenchmarkArguments)
|
||||
benchmark_args = parser.parse_args_into_dataclasses()[0]
|
||||
benchmark = PyTorchBenchmark(args=benchmark_args)
|
||||
benchmark.run()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
29
examples/benchmarking/run_benchmark_tf.py
Normal file
29
examples/benchmarking/run_benchmark_tf.py
Normal file
@ -0,0 +1,29 @@
|
||||
# coding=utf-8
|
||||
# Copyright 2018 The HuggingFace Inc. team.
|
||||
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
""" Benchmarking the library on inference and training in TensorFlow"""
|
||||
|
||||
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
|
||||
|
||||
|
||||
def main():
|
||||
parser = HfArgumentParser(TensorFlowBenchmarkArguments)
|
||||
benchmark_args = parser.parse_args_into_dataclasses()[0]
|
||||
benchmark = TensorFlowBenchmark(args=benchmark_args)
|
||||
benchmark.run()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
89
examples/bert-loses-patience/README.md
Executable file
89
examples/bert-loses-patience/README.md
Executable file
@ -0,0 +1,89 @@
|
||||
# Patience-based Early Exit
|
||||
|
||||
Patience-based Early Exit (PABEE) is a plug-and-play inference method for pretrained language models.
|
||||
We have already implemented it on BERT and ALBERT. Basically, you can make your LM faster and more robust with PABEE. It can even improve the performance of ALBERT on GLUE. The only sacrifice is that the batch size can only be 1.
|
||||
Learn more in the paper ["BERT Loses Patience: Fast and Robust Inference with Early Exit"](https://arxiv.org/abs/2006.04152) and the official [GitHub repo](https://github.com/JetRunner/PABEE).
|
||||
|
||||

|
||||
|
||||
## Training
|
||||
|
||||
You can fine-tune a pretrained language model (you can choose from BERT and ALBERT) and train the internal classifiers by:
|
||||
```bash
|
||||
export GLUE_DIR=/path/to/glue_data
|
||||
export TASK_NAME=MRPC
|
||||
|
||||
python ./run_glue_with_pabee.py \
|
||||
--model_type albert \
|
||||
--model_name_or_path bert-base-uncased/albert-base-v2 \
|
||||
--task_name $TASK_NAME \
|
||||
--do_train \
|
||||
--do_eval \
|
||||
--do_lower_case \
|
||||
--data_dir "$GLUE_DIR/$TASK_NAME" \
|
||||
--max_seq_length 128 \
|
||||
--per_gpu_train_batch_size 32 \
|
||||
--per_gpu_eval_batch_size 32 \
|
||||
--learning_rate 2e-5 \
|
||||
--save_steps 50 \
|
||||
--logging_steps 50 \
|
||||
--num_train_epochs 5 \
|
||||
--output_dir /path/to/save/ \
|
||||
--evaluate_during_training
|
||||
```
|
||||
|
||||
## Inference
|
||||
|
||||
You can inference with different patience settings by:
|
||||
```bash
|
||||
export GLUE_DIR=/path/to/glue_data
|
||||
export TASK_NAME=MRPC
|
||||
|
||||
python ./run_glue_with_pabee.py \
|
||||
--model_type albert \
|
||||
--model_name_or_path /path/to/save/ \
|
||||
--task_name $TASK_NAME \
|
||||
--do_eval \
|
||||
--do_lower_case \
|
||||
--data_dir "$GLUE_DIR/$TASK_NAME" \
|
||||
--max_seq_length 128 \
|
||||
--per_gpu_eval_batch_size 1 \
|
||||
--learning_rate 2e-5 \
|
||||
--logging_steps 50 \
|
||||
--num_train_epochs 15 \
|
||||
--output_dir /path/to/save/ \
|
||||
--eval_all_checkpoints \
|
||||
--patience 3,4,5,6,7,8
|
||||
```
|
||||
where `patience` can be a list of patience settings, separated by a comma. It will help determine which patience works best.
|
||||
|
||||
When evaluating on a regression task (STS-B), you may add `--regression_threshold 0.1` to define the regression threshold.
|
||||
|
||||
## Results
|
||||
On the GLUE dev set:
|
||||
|
||||
| Model | \#Param | Speed | CoLA | MNLI | MRPC | QNLI | QQP | RTE | SST\-2 | STS\-B |
|
||||
|--------------|---------|--------|-------|-------|-------|-------|-------|-------|--------|--------|
|
||||
| ALBERT\-base | 12M | | 58\.9 | 84\.6 | 89\.5 | 91\.7 | 89\.6 | 78\.6 | 92\.8 | 89\.5 |
|
||||
| \+PABEE | 12M | 1\.57x | 61\.2 | 85\.1 | 90\.0 | 91\.8 | 89\.6 | 80\.1 | 93\.0 | 90\.1 |
|
||||
|
||||
| Model | \#Param | Speed\-up | MNLI | SST\-2 | STS\-B |
|
||||
|---------------|---------|-----------|-------|--------|--------|
|
||||
| BERT\-base | 108M | | 84\.5 | 92\.1 | 88\.9 |
|
||||
| \+PABEE | 108M | 1\.62x | 83\.6 | 92\.0 | 88\.7 |
|
||||
| ALBERT\-large | 18M | | 86\.4 | 94\.9 | 90\.4 |
|
||||
| \+PABEE | 18M | 2\.42x | 86\.8 | 95\.2 | 90\.6 |
|
||||
|
||||
|
||||
## Citation
|
||||
If you find this resource useful, please consider citing the following paper:
|
||||
```bibtex
|
||||
@misc{zhou2020bert,
|
||||
title={BERT Loses Patience: Fast and Robust Inference with Early Exit},
|
||||
author={Wangchunshu Zhou and Canwen Xu and Tao Ge and Julian McAuley and Ke Xu and Furu Wei},
|
||||
year={2020},
|
||||
eprint={2006.04152},
|
||||
archivePrefix={arXiv},
|
||||
primaryClass={cs.CL}
|
||||
}
|
||||
```
|
||||
310
examples/bert-loses-patience/pabee/modeling_pabee_albert.py
Normal file
310
examples/bert-loses-patience/pabee/modeling_pabee_albert.py
Normal file
@ -0,0 +1,310 @@
|
||||
# coding=utf-8
|
||||
# Copyright 2020 Google AI, Google Brain, the HuggingFace Inc. team and Microsoft Corporation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""PyTorch ALBERT model with Patience-based Early Exit. """
|
||||
|
||||
import logging
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
from torch.nn import CrossEntropyLoss, MSELoss
|
||||
|
||||
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_callable
|
||||
from transformers.modeling_albert import (
|
||||
ALBERT_INPUTS_DOCSTRING,
|
||||
ALBERT_START_DOCSTRING,
|
||||
AlbertModel,
|
||||
AlbertPreTrainedModel,
|
||||
AlbertTransformer,
|
||||
)
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AlbertTransformerWithPabee(AlbertTransformer):
|
||||
def adaptive_forward(self, hidden_states, current_layer, attention_mask=None, head_mask=None):
|
||||
if current_layer == 0:
|
||||
hidden_states = self.embedding_hidden_mapping_in(hidden_states)
|
||||
else:
|
||||
hidden_states = hidden_states[0]
|
||||
|
||||
layers_per_group = int(self.config.num_hidden_layers / self.config.num_hidden_groups)
|
||||
|
||||
# Index of the hidden group
|
||||
group_idx = int(current_layer / (self.config.num_hidden_layers / self.config.num_hidden_groups))
|
||||
|
||||
layer_group_output = self.albert_layer_groups[group_idx](
|
||||
hidden_states,
|
||||
attention_mask,
|
||||
head_mask[group_idx * layers_per_group : (group_idx + 1) * layers_per_group],
|
||||
)
|
||||
hidden_states = layer_group_output[0]
|
||||
|
||||
return (hidden_states,)
|
||||
|
||||
|
||||
@add_start_docstrings(
|
||||
"The bare ALBERT Model transformer with PABEE outputting raw hidden-states without any specific head on top.",
|
||||
ALBERT_START_DOCSTRING,
|
||||
)
|
||||
class AlbertModelWithPabee(AlbertModel):
|
||||
def __init__(self, config):
|
||||
super().__init__(config)
|
||||
|
||||
self.encoder = AlbertTransformerWithPabee(config)
|
||||
|
||||
self.init_weights()
|
||||
self.patience = 0
|
||||
self.inference_instances_num = 0
|
||||
self.inference_layers_num = 0
|
||||
|
||||
self.regression_threshold = 0
|
||||
|
||||
def set_regression_threshold(self, threshold):
|
||||
self.regression_threshold = threshold
|
||||
|
||||
def set_patience(self, patience):
|
||||
self.patience = patience
|
||||
|
||||
def reset_stats(self):
|
||||
self.inference_instances_num = 0
|
||||
self.inference_layers_num = 0
|
||||
|
||||
def log_stats(self):
|
||||
avg_inf_layers = self.inference_layers_num / self.inference_instances_num
|
||||
message = f"*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up = {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"
|
||||
print(message)
|
||||
|
||||
@add_start_docstrings_to_callable(ALBERT_INPUTS_DOCSTRING)
|
||||
def forward(
|
||||
self,
|
||||
input_ids=None,
|
||||
attention_mask=None,
|
||||
token_type_ids=None,
|
||||
position_ids=None,
|
||||
head_mask=None,
|
||||
inputs_embeds=None,
|
||||
output_dropout=None,
|
||||
output_layers=None,
|
||||
regression=False,
|
||||
):
|
||||
r"""
|
||||
Return:
|
||||
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.AlbertConfig`) and inputs:
|
||||
last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
|
||||
Sequence of hidden-states at the output of the last layer of the model.
|
||||
pooler_output (:obj:`torch.FloatTensor`: of shape :obj:`(batch_size, hidden_size)`):
|
||||
Last layer hidden-state of the first token of the sequence (classification token)
|
||||
further processed by a Linear layer and a Tanh activation function. The Linear
|
||||
layer weights are trained from the next sentence prediction (classification)
|
||||
objective during pre-training.
|
||||
|
||||
This output is usually *not* a good summary
|
||||
of the semantic content of the input, you're often better with averaging or pooling
|
||||
the sequence of hidden-states for the whole input sequence.
|
||||
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
|
||||
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
|
||||
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
|
||||
|
||||
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
|
||||
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
|
||||
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
|
||||
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
|
||||
|
||||
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
|
||||
heads.
|
||||
"""
|
||||
|
||||
if input_ids is not None and inputs_embeds is not None:
|
||||
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
|
||||
elif input_ids is not None:
|
||||
input_shape = input_ids.size()
|
||||
elif inputs_embeds is not None:
|
||||
input_shape = inputs_embeds.size()[:-1]
|
||||
else:
|
||||
raise ValueError("You have to specify either input_ids or inputs_embeds")
|
||||
|
||||
device = input_ids.device if input_ids is not None else inputs_embeds.device
|
||||
|
||||
if attention_mask is None:
|
||||
attention_mask = torch.ones(input_shape, device=device)
|
||||
if token_type_ids is None:
|
||||
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
|
||||
|
||||
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
|
||||
extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility
|
||||
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
|
||||
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
|
||||
|
||||
embedding_output = self.embeddings(
|
||||
input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds
|
||||
)
|
||||
encoder_outputs = embedding_output
|
||||
|
||||
if self.training:
|
||||
res = []
|
||||
for i in range(self.config.num_hidden_layers):
|
||||
encoder_outputs = self.encoder.adaptive_forward(
|
||||
encoder_outputs, current_layer=i, attention_mask=extended_attention_mask, head_mask=head_mask,
|
||||
)
|
||||
|
||||
pooled_output = self.pooler_activation(self.pooler(encoder_outputs[0][:, 0]))
|
||||
logits = output_layers[i](output_dropout(pooled_output))
|
||||
res.append(logits)
|
||||
elif self.patience == 0: # Use all layers for inference
|
||||
encoder_outputs = self.encoder(encoder_outputs, extended_attention_mask, head_mask=head_mask)
|
||||
pooled_output = self.pooler_activation(self.pooler(encoder_outputs[0][:, 0]))
|
||||
res = [output_layers[self.config.num_hidden_layers - 1](pooled_output)]
|
||||
else:
|
||||
patient_counter = 0
|
||||
patient_result = None
|
||||
calculated_layer_num = 0
|
||||
for i in range(self.config.num_hidden_layers):
|
||||
calculated_layer_num += 1
|
||||
encoder_outputs = self.encoder.adaptive_forward(
|
||||
encoder_outputs, current_layer=i, attention_mask=extended_attention_mask, head_mask=head_mask,
|
||||
)
|
||||
|
||||
pooled_output = self.pooler_activation(self.pooler(encoder_outputs[0][:, 0]))
|
||||
logits = output_layers[i](pooled_output)
|
||||
if regression:
|
||||
labels = logits.detach()
|
||||
if patient_result is not None:
|
||||
patient_labels = patient_result.detach()
|
||||
if (patient_result is not None) and torch.abs(patient_result - labels) < self.regression_threshold:
|
||||
patient_counter += 1
|
||||
else:
|
||||
patient_counter = 0
|
||||
else:
|
||||
labels = logits.detach().argmax(dim=1)
|
||||
if patient_result is not None:
|
||||
patient_labels = patient_result.detach().argmax(dim=1)
|
||||
if (patient_result is not None) and torch.all(labels.eq(patient_labels)):
|
||||
patient_counter += 1
|
||||
else:
|
||||
patient_counter = 0
|
||||
|
||||
patient_result = logits
|
||||
if patient_counter == self.patience:
|
||||
break
|
||||
res = [patient_result]
|
||||
self.inference_layers_num += calculated_layer_num
|
||||
self.inference_instances_num += 1
|
||||
|
||||
return res
|
||||
|
||||
|
||||
@add_start_docstrings(
|
||||
"""Albert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of
|
||||
the pooled output) e.g. for GLUE tasks. """,
|
||||
ALBERT_START_DOCSTRING,
|
||||
)
|
||||
class AlbertForSequenceClassificationWithPabee(AlbertPreTrainedModel):
|
||||
def __init__(self, config):
|
||||
super().__init__(config)
|
||||
self.num_labels = config.num_labels
|
||||
|
||||
self.albert = AlbertModelWithPabee(config)
|
||||
self.dropout = nn.Dropout(config.classifier_dropout_prob)
|
||||
self.classifiers = nn.ModuleList(
|
||||
[nn.Linear(config.hidden_size, self.config.num_labels) for _ in range(config.num_hidden_layers)]
|
||||
)
|
||||
|
||||
self.init_weights()
|
||||
|
||||
@add_start_docstrings_to_callable(ALBERT_INPUTS_DOCSTRING)
|
||||
def forward(
|
||||
self,
|
||||
input_ids=None,
|
||||
attention_mask=None,
|
||||
token_type_ids=None,
|
||||
position_ids=None,
|
||||
head_mask=None,
|
||||
inputs_embeds=None,
|
||||
labels=None,
|
||||
):
|
||||
r"""
|
||||
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
|
||||
Labels for computing the sequence classification/regression loss.
|
||||
Indices should be in ``[0, ..., config.num_labels - 1]``.
|
||||
If ``config.num_labels == 1`` a regression loss is computed (Mean-Square loss),
|
||||
If ``config.num_labels > 1`` a classification loss is computed (Cross-Entropy).
|
||||
|
||||
Returns:
|
||||
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.AlbertConfig`) and inputs:
|
||||
loss: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
|
||||
Classification (or regression if config.num_labels==1) loss.
|
||||
logits ``torch.FloatTensor`` of shape ``(batch_size, config.num_labels)``
|
||||
Classification (or regression if config.num_labels==1) scores (before SoftMax).
|
||||
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
|
||||
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
|
||||
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
|
||||
|
||||
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
|
||||
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
|
||||
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
|
||||
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
|
||||
|
||||
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
|
||||
heads.
|
||||
|
||||
Examples::
|
||||
|
||||
from transformers import AlbertTokenizer
|
||||
from pabee import AlbertForSequenceClassificationWithPabee
|
||||
import torch
|
||||
|
||||
tokenizer = AlbertTokenizer.from_pretrained('albert-base-v2')
|
||||
model = AlbertForSequenceClassificationWithPabee.from_pretrained('albert-base-v2')
|
||||
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
|
||||
labels = torch.tensor([1]).unsqueeze(0) # Batch size 1
|
||||
outputs = model(input_ids, labels=labels)
|
||||
loss, logits = outputs[:2]
|
||||
|
||||
"""
|
||||
|
||||
logits = self.albert(
|
||||
input_ids=input_ids,
|
||||
attention_mask=attention_mask,
|
||||
token_type_ids=token_type_ids,
|
||||
position_ids=position_ids,
|
||||
head_mask=head_mask,
|
||||
inputs_embeds=inputs_embeds,
|
||||
output_dropout=self.dropout,
|
||||
output_layers=self.classifiers,
|
||||
regression=self.num_labels == 1,
|
||||
)
|
||||
|
||||
outputs = (logits[-1],)
|
||||
|
||||
if labels is not None:
|
||||
total_loss = None
|
||||
total_weights = 0
|
||||
for ix, logits_item in enumerate(logits):
|
||||
if self.num_labels == 1:
|
||||
# We are doing regression
|
||||
loss_fct = MSELoss()
|
||||
loss = loss_fct(logits_item.view(-1), labels.view(-1))
|
||||
else:
|
||||
loss_fct = CrossEntropyLoss()
|
||||
loss = loss_fct(logits_item.view(-1, self.num_labels), labels.view(-1))
|
||||
if total_loss is None:
|
||||
total_loss = loss
|
||||
else:
|
||||
total_loss += loss * (ix + 1)
|
||||
total_weights += ix + 1
|
||||
outputs = (total_loss / total_weights,) + outputs
|
||||
|
||||
return outputs
|
||||
342
examples/bert-loses-patience/pabee/modeling_pabee_bert.py
Normal file
342
examples/bert-loses-patience/pabee/modeling_pabee_bert.py
Normal file
@ -0,0 +1,342 @@
|
||||
# coding=utf-8
|
||||
# Copyright 2020 The Google AI Language Team Authors, The HuggingFace Inc. team and Microsoft Corporation.
|
||||
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""PyTorch BERT model with Patience-based Early Exit. """
|
||||
|
||||
|
||||
import logging
|
||||
|
||||
import torch
|
||||
from torch import nn
|
||||
from torch.nn import CrossEntropyLoss, MSELoss
|
||||
|
||||
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_callable
|
||||
from transformers.modeling_bert import (
|
||||
BERT_INPUTS_DOCSTRING,
|
||||
BERT_START_DOCSTRING,
|
||||
BertEncoder,
|
||||
BertModel,
|
||||
BertPreTrainedModel,
|
||||
)
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class BertEncoderWithPabee(BertEncoder):
|
||||
def adaptive_forward(self, hidden_states, current_layer, attention_mask=None, head_mask=None):
|
||||
layer_outputs = self.layer[current_layer](hidden_states, attention_mask, head_mask[current_layer])
|
||||
|
||||
hidden_states = layer_outputs[0]
|
||||
|
||||
return hidden_states
|
||||
|
||||
|
||||
@add_start_docstrings(
|
||||
"The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.",
|
||||
BERT_START_DOCSTRING,
|
||||
)
|
||||
class BertModelWithPabee(BertModel):
|
||||
"""
|
||||
|
||||
The model can behave as an encoder (with only self-attention) as well
|
||||
as a decoder, in which case a layer of cross-attention is added between
|
||||
the self-attention layers, following the architecture described in `Attention is all you need`_ by Ashish Vaswani,
|
||||
Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
|
||||
|
||||
To behave as an decoder the model needs to be initialized with the
|
||||
:obj:`is_decoder` argument of the configuration set to :obj:`True`; an
|
||||
:obj:`encoder_hidden_states` is expected as an input to the forward pass.
|
||||
|
||||
.. _`Attention is all you need`:
|
||||
https://arxiv.org/abs/1706.03762
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, config):
|
||||
super().__init__(config)
|
||||
|
||||
self.encoder = BertEncoderWithPabee(config)
|
||||
|
||||
self.init_weights()
|
||||
self.patience = 0
|
||||
self.inference_instances_num = 0
|
||||
self.inference_layers_num = 0
|
||||
|
||||
self.regression_threshold = 0
|
||||
|
||||
def set_regression_threshold(self, threshold):
|
||||
self.regression_threshold = threshold
|
||||
|
||||
def set_patience(self, patience):
|
||||
self.patience = patience
|
||||
|
||||
def reset_stats(self):
|
||||
self.inference_instances_num = 0
|
||||
self.inference_layers_num = 0
|
||||
|
||||
def log_stats(self):
|
||||
avg_inf_layers = self.inference_layers_num / self.inference_instances_num
|
||||
message = f"*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up = {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"
|
||||
print(message)
|
||||
|
||||
@add_start_docstrings_to_callable(BERT_INPUTS_DOCSTRING)
|
||||
def forward(
|
||||
self,
|
||||
input_ids=None,
|
||||
attention_mask=None,
|
||||
token_type_ids=None,
|
||||
position_ids=None,
|
||||
head_mask=None,
|
||||
inputs_embeds=None,
|
||||
encoder_hidden_states=None,
|
||||
encoder_attention_mask=None,
|
||||
output_dropout=None,
|
||||
output_layers=None,
|
||||
regression=False,
|
||||
):
|
||||
r"""
|
||||
Return:
|
||||
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.BertConfig`) and inputs:
|
||||
last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
|
||||
Sequence of hidden-states at the output of the last layer of the model.
|
||||
pooler_output (:obj:`torch.FloatTensor`: of shape :obj:`(batch_size, hidden_size)`):
|
||||
Last layer hidden-state of the first token of the sequence (classification token)
|
||||
further processed by a Linear layer and a Tanh activation function. The Linear
|
||||
layer weights are trained from the next sentence prediction (classification)
|
||||
objective during pre-training.
|
||||
|
||||
This output is usually *not* a good summary
|
||||
of the semantic content of the input, you're often better with averaging or pooling
|
||||
the sequence of hidden-states for the whole input sequence.
|
||||
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
|
||||
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
|
||||
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
|
||||
|
||||
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
|
||||
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
|
||||
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
|
||||
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
|
||||
|
||||
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
|
||||
heads.
|
||||
"""
|
||||
|
||||
if input_ids is not None and inputs_embeds is not None:
|
||||
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
|
||||
elif input_ids is not None:
|
||||
input_shape = input_ids.size()
|
||||
elif inputs_embeds is not None:
|
||||
input_shape = inputs_embeds.size()[:-1]
|
||||
else:
|
||||
raise ValueError("You have to specify either input_ids or inputs_embeds")
|
||||
|
||||
device = input_ids.device if input_ids is not None else inputs_embeds.device
|
||||
|
||||
if attention_mask is None:
|
||||
attention_mask = torch.ones(input_shape, device=device)
|
||||
if token_type_ids is None:
|
||||
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
|
||||
|
||||
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
|
||||
# ourselves in which case we just need to make it broadcastable to all heads.
|
||||
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device)
|
||||
|
||||
# If a 2D ou 3D attention mask is provided for the cross-attention
|
||||
# we need to make broadcastabe to [batch_size, num_heads, seq_length, seq_length]
|
||||
if self.config.is_decoder and encoder_hidden_states is not None:
|
||||
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
|
||||
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
|
||||
if encoder_attention_mask is None:
|
||||
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
|
||||
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
|
||||
else:
|
||||
encoder_extended_attention_mask = None
|
||||
|
||||
# Prepare head mask if needed
|
||||
# 1.0 in head_mask indicate we keep the head
|
||||
# attention_probs has shape bsz x n_heads x N x N
|
||||
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
|
||||
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
|
||||
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
|
||||
|
||||
embedding_output = self.embeddings(
|
||||
input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds
|
||||
)
|
||||
encoder_outputs = embedding_output
|
||||
|
||||
if self.training:
|
||||
res = []
|
||||
for i in range(self.config.num_hidden_layers):
|
||||
encoder_outputs = self.encoder.adaptive_forward(
|
||||
encoder_outputs, current_layer=i, attention_mask=extended_attention_mask, head_mask=head_mask
|
||||
)
|
||||
|
||||
pooled_output = self.pooler(encoder_outputs)
|
||||
logits = output_layers[i](output_dropout(pooled_output))
|
||||
res.append(logits)
|
||||
elif self.patience == 0: # Use all layers for inference
|
||||
encoder_outputs = self.encoder(
|
||||
embedding_output,
|
||||
attention_mask=extended_attention_mask,
|
||||
head_mask=head_mask,
|
||||
encoder_hidden_states=encoder_hidden_states,
|
||||
encoder_attention_mask=encoder_extended_attention_mask,
|
||||
)
|
||||
pooled_output = self.pooler(encoder_outputs[0])
|
||||
res = [output_layers[self.config.num_hidden_layers - 1](pooled_output)]
|
||||
else:
|
||||
patient_counter = 0
|
||||
patient_result = None
|
||||
calculated_layer_num = 0
|
||||
for i in range(self.config.num_hidden_layers):
|
||||
calculated_layer_num += 1
|
||||
encoder_outputs = self.encoder.adaptive_forward(
|
||||
encoder_outputs, current_layer=i, attention_mask=extended_attention_mask, head_mask=head_mask
|
||||
)
|
||||
|
||||
pooled_output = self.pooler(encoder_outputs)
|
||||
logits = output_layers[i](pooled_output)
|
||||
if regression:
|
||||
labels = logits.detach()
|
||||
if patient_result is not None:
|
||||
patient_labels = patient_result.detach()
|
||||
if (patient_result is not None) and torch.abs(patient_result - labels) < self.regression_threshold:
|
||||
patient_counter += 1
|
||||
else:
|
||||
patient_counter = 0
|
||||
else:
|
||||
labels = logits.detach().argmax(dim=1)
|
||||
if patient_result is not None:
|
||||
patient_labels = patient_result.detach().argmax(dim=1)
|
||||
if (patient_result is not None) and torch.all(labels.eq(patient_labels)):
|
||||
patient_counter += 1
|
||||
else:
|
||||
patient_counter = 0
|
||||
|
||||
patient_result = logits
|
||||
if patient_counter == self.patience:
|
||||
break
|
||||
res = [patient_result]
|
||||
self.inference_layers_num += calculated_layer_num
|
||||
self.inference_instances_num += 1
|
||||
|
||||
return res
|
||||
|
||||
|
||||
@add_start_docstrings(
|
||||
"""Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of
|
||||
the pooled output) e.g. for GLUE tasks. """,
|
||||
BERT_START_DOCSTRING,
|
||||
)
|
||||
class BertForSequenceClassificationWithPabee(BertPreTrainedModel):
|
||||
def __init__(self, config):
|
||||
super().__init__(config)
|
||||
self.num_labels = config.num_labels
|
||||
|
||||
self.bert = BertModelWithPabee(config)
|
||||
self.dropout = nn.Dropout(config.hidden_dropout_prob)
|
||||
self.classifiers = nn.ModuleList(
|
||||
[nn.Linear(config.hidden_size, self.config.num_labels) for _ in range(config.num_hidden_layers)]
|
||||
)
|
||||
|
||||
self.init_weights()
|
||||
|
||||
@add_start_docstrings_to_callable(BERT_INPUTS_DOCSTRING)
|
||||
def forward(
|
||||
self,
|
||||
input_ids=None,
|
||||
attention_mask=None,
|
||||
token_type_ids=None,
|
||||
position_ids=None,
|
||||
head_mask=None,
|
||||
inputs_embeds=None,
|
||||
labels=None,
|
||||
):
|
||||
r"""
|
||||
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
|
||||
Labels for computing the sequence classification/regression loss.
|
||||
Indices should be in :obj:`[0, ..., config.num_labels - 1]`.
|
||||
If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
|
||||
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
|
||||
|
||||
Returns:
|
||||
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.BertConfig`) and inputs:
|
||||
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`label` is provided):
|
||||
Classification (or regression if config.num_labels==1) loss.
|
||||
logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, config.num_labels)`):
|
||||
Classification (or regression if config.num_labels==1) scores (before SoftMax).
|
||||
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
|
||||
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
|
||||
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
|
||||
|
||||
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
|
||||
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
|
||||
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
|
||||
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
|
||||
|
||||
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
|
||||
heads.
|
||||
|
||||
Examples::
|
||||
|
||||
from transformers import BertTokenizer, BertForSequenceClassification
|
||||
from pabee import BertForSequenceClassificationWithPabee
|
||||
import torch
|
||||
|
||||
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
|
||||
model = BertForSequenceClassificationWithPabee.from_pretrained('bert-base-uncased')
|
||||
|
||||
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
|
||||
labels = torch.tensor([1]).unsqueeze(0) # Batch size 1
|
||||
outputs = model(input_ids, labels=labels)
|
||||
|
||||
loss, logits = outputs[:2]
|
||||
|
||||
"""
|
||||
|
||||
logits = self.bert(
|
||||
input_ids=input_ids,
|
||||
attention_mask=attention_mask,
|
||||
token_type_ids=token_type_ids,
|
||||
position_ids=position_ids,
|
||||
head_mask=head_mask,
|
||||
inputs_embeds=inputs_embeds,
|
||||
output_dropout=self.dropout,
|
||||
output_layers=self.classifiers,
|
||||
regression=self.num_labels == 1,
|
||||
)
|
||||
|
||||
outputs = (logits[-1],)
|
||||
|
||||
if labels is not None:
|
||||
total_loss = None
|
||||
total_weights = 0
|
||||
for ix, logits_item in enumerate(logits):
|
||||
if self.num_labels == 1:
|
||||
# We are doing regression
|
||||
loss_fct = MSELoss()
|
||||
loss = loss_fct(logits_item.view(-1), labels.view(-1))
|
||||
else:
|
||||
loss_fct = CrossEntropyLoss()
|
||||
loss = loss_fct(logits_item.view(-1, self.num_labels), labels.view(-1))
|
||||
if total_loss is None:
|
||||
total_loss = loss
|
||||
else:
|
||||
total_loss += loss * (ix + 1)
|
||||
total_weights += ix + 1
|
||||
outputs = (total_loss / total_weights,) + outputs
|
||||
|
||||
return outputs
|
||||
697
examples/bert-loses-patience/run_glue_with_pabee.py
Executable file
697
examples/bert-loses-patience/run_glue_with_pabee.py
Executable file
@ -0,0 +1,697 @@
|
||||
# coding=utf-8
|
||||
# Copyright 2020 The Google AI Language Team Authors, The HuggingFace Inc. team and Microsoft Corporation.
|
||||
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
""" Training and inference using the library models for sequence classification on GLUE (Bert, Albert) with PABEE."""
|
||||
|
||||
|
||||
import argparse
|
||||
import glob
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import random
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
|
||||
from torch.utils.data.distributed import DistributedSampler
|
||||
from tqdm import tqdm, trange
|
||||
|
||||
from pabee.modeling_pabee_albert import AlbertForSequenceClassificationWithPabee
|
||||
from pabee.modeling_pabee_bert import BertForSequenceClassificationWithPabee
|
||||
from transformers import (
|
||||
WEIGHTS_NAME,
|
||||
AdamW,
|
||||
AlbertConfig,
|
||||
AlbertTokenizer,
|
||||
BertConfig,
|
||||
BertTokenizer,
|
||||
get_linear_schedule_with_warmup,
|
||||
)
|
||||
from transformers import glue_compute_metrics as compute_metrics
|
||||
from transformers import glue_convert_examples_to_features as convert_examples_to_features
|
||||
from transformers import glue_output_modes as output_modes
|
||||
from transformers import glue_processors as processors
|
||||
|
||||
|
||||
try:
|
||||
from torch.utils.tensorboard import SummaryWriter
|
||||
except ImportError:
|
||||
from tensorboardX import SummaryWriter
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
MODEL_CLASSES = {
|
||||
"bert": (BertConfig, BertForSequenceClassificationWithPabee, BertTokenizer),
|
||||
"albert": (AlbertConfig, AlbertForSequenceClassificationWithPabee, AlbertTokenizer),
|
||||
}
|
||||
|
||||
|
||||
def set_seed(args):
|
||||
random.seed(args.seed)
|
||||
np.random.seed(args.seed)
|
||||
torch.manual_seed(args.seed)
|
||||
if args.n_gpu > 0:
|
||||
torch.cuda.manual_seed_all(args.seed)
|
||||
|
||||
|
||||
def train(args, train_dataset, model, tokenizer):
|
||||
""" Train the model """
|
||||
if args.local_rank in [-1, 0]:
|
||||
tb_writer = SummaryWriter()
|
||||
|
||||
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
|
||||
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
|
||||
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
|
||||
|
||||
if args.max_steps > 0:
|
||||
t_total = args.max_steps
|
||||
args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
|
||||
else:
|
||||
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
|
||||
|
||||
# Prepare optimizer and schedule (linear warmup and decay)
|
||||
no_decay = ["bias", "LayerNorm.weight"]
|
||||
optimizer_grouped_parameters = [
|
||||
{
|
||||
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
|
||||
"weight_decay": args.weight_decay,
|
||||
},
|
||||
{"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0},
|
||||
]
|
||||
|
||||
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
|
||||
scheduler = get_linear_schedule_with_warmup(
|
||||
optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total
|
||||
)
|
||||
|
||||
# Check if saved optimizer or scheduler states exist
|
||||
if os.path.isfile(os.path.join(args.model_name_or_path, "optimizer.pt")) and os.path.isfile(
|
||||
os.path.join(args.model_name_or_path, "scheduler.pt")
|
||||
):
|
||||
# Load in optimizer and scheduler states
|
||||
optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "optimizer.pt")))
|
||||
scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "scheduler.pt")))
|
||||
|
||||
if args.fp16:
|
||||
try:
|
||||
from apex import amp
|
||||
except ImportError:
|
||||
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
|
||||
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
|
||||
|
||||
# multi-gpu training (should be after apex fp16 initialization)
|
||||
if args.n_gpu > 1:
|
||||
model = torch.nn.DataParallel(model)
|
||||
|
||||
# Distributed training (should be after apex fp16 initialization)
|
||||
if args.local_rank != -1:
|
||||
model = torch.nn.parallel.DistributedDataParallel(
|
||||
model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True,
|
||||
)
|
||||
|
||||
# Train!
|
||||
logger.info("***** Running training *****")
|
||||
logger.info(" Num examples = %d", len(train_dataset))
|
||||
logger.info(" Num Epochs = %d", args.num_train_epochs)
|
||||
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
|
||||
logger.info(
|
||||
" Total train batch size (w. parallel, distributed & accumulation) = %d",
|
||||
args.train_batch_size
|
||||
* args.gradient_accumulation_steps
|
||||
* (torch.distributed.get_world_size() if args.local_rank != -1 else 1),
|
||||
)
|
||||
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
|
||||
logger.info(" Total optimization steps = %d", t_total)
|
||||
|
||||
global_step = 0
|
||||
epochs_trained = 0
|
||||
steps_trained_in_current_epoch = 0
|
||||
# Check if continuing training from a checkpoint
|
||||
if os.path.exists(args.model_name_or_path):
|
||||
# set global_step to gobal_step of last saved checkpoint from model path
|
||||
global_step = int(args.model_name_or_path.split("-")[-1].split("/")[0])
|
||||
epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps)
|
||||
steps_trained_in_current_epoch = global_step % (len(train_dataloader) // args.gradient_accumulation_steps)
|
||||
|
||||
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
|
||||
logger.info(" Continuing training from epoch %d", epochs_trained)
|
||||
logger.info(" Continuing training from global step %d", global_step)
|
||||
logger.info(
|
||||
" Will skip the first %d steps in the first epoch", steps_trained_in_current_epoch,
|
||||
)
|
||||
|
||||
tr_loss, logging_loss = 0.0, 0.0
|
||||
model.zero_grad()
|
||||
train_iterator = trange(
|
||||
epochs_trained, int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0],
|
||||
)
|
||||
set_seed(args) # Added here for reproductibility
|
||||
for _ in train_iterator:
|
||||
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])
|
||||
for step, batch in enumerate(epoch_iterator):
|
||||
|
||||
# Skip past any already trained steps if resuming training
|
||||
if steps_trained_in_current_epoch > 0:
|
||||
steps_trained_in_current_epoch -= 1
|
||||
continue
|
||||
|
||||
model.train()
|
||||
batch = tuple(t.to(args.device) for t in batch)
|
||||
inputs = {
|
||||
"input_ids": batch[0],
|
||||
"attention_mask": batch[1],
|
||||
"labels": batch[3],
|
||||
}
|
||||
inputs["token_type_ids"] = batch[2]
|
||||
outputs = model(**inputs)
|
||||
loss = outputs[0] # model outputs are always tuple in transformers (see doc)
|
||||
|
||||
if args.n_gpu > 1:
|
||||
loss = loss.mean() # mean() to average on multi-gpu parallel training
|
||||
if args.gradient_accumulation_steps > 1:
|
||||
loss = loss / args.gradient_accumulation_steps
|
||||
|
||||
if args.fp16:
|
||||
with amp.scale_loss(loss, optimizer) as scaled_loss:
|
||||
scaled_loss.backward()
|
||||
else:
|
||||
loss.backward()
|
||||
|
||||
tr_loss += loss.item()
|
||||
if (step + 1) % args.gradient_accumulation_steps == 0:
|
||||
if args.fp16:
|
||||
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
|
||||
else:
|
||||
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
|
||||
|
||||
optimizer.step()
|
||||
scheduler.step() # Update learning rate schedule
|
||||
model.zero_grad()
|
||||
global_step += 1
|
||||
|
||||
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
|
||||
logs = {}
|
||||
if (
|
||||
args.local_rank == -1 and args.evaluate_during_training
|
||||
): # Only evaluate when single GPU otherwise metrics may not average well
|
||||
results = evaluate(args, model, tokenizer)
|
||||
for key, value in results.items():
|
||||
eval_key = "eval_{}".format(key)
|
||||
logs[eval_key] = value
|
||||
|
||||
loss_scalar = (tr_loss - logging_loss) / args.logging_steps
|
||||
learning_rate_scalar = scheduler.get_lr()[0]
|
||||
logs["learning_rate"] = learning_rate_scalar
|
||||
logs["loss"] = loss_scalar
|
||||
logging_loss = tr_loss
|
||||
|
||||
for key, value in logs.items():
|
||||
tb_writer.add_scalar(key, value, global_step)
|
||||
print(json.dumps({**logs, **{"step": global_step}}))
|
||||
|
||||
if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:
|
||||
# Save model checkpoint
|
||||
output_dir = os.path.join(args.output_dir, "checkpoint-{}".format(global_step))
|
||||
model_to_save = (
|
||||
model.module if hasattr(model, "module") else model
|
||||
) # Take care of distributed/parallel training
|
||||
model_to_save.save_pretrained(output_dir)
|
||||
tokenizer.save_pretrained(output_dir)
|
||||
|
||||
torch.save(args, os.path.join(output_dir, "training_args.bin"))
|
||||
logger.info("Saving model checkpoint to %s", output_dir)
|
||||
|
||||
torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
|
||||
torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
|
||||
logger.info("Saving optimizer and scheduler states to %s", output_dir)
|
||||
|
||||
if args.max_steps > 0 and global_step > args.max_steps:
|
||||
epoch_iterator.close()
|
||||
break
|
||||
if args.max_steps > 0 and global_step > args.max_steps:
|
||||
train_iterator.close()
|
||||
break
|
||||
|
||||
if args.local_rank in [-1, 0]:
|
||||
tb_writer.close()
|
||||
|
||||
return global_step, tr_loss / global_step
|
||||
|
||||
|
||||
def evaluate(args, model, tokenizer, prefix="", patience=0):
|
||||
|
||||
if args.model_type == "albert":
|
||||
model.albert.set_regression_threshold(args.regression_threshold)
|
||||
model.albert.set_patience(patience)
|
||||
model.albert.reset_stats()
|
||||
elif args.model_type == "bert":
|
||||
model.bert.set_regression_threshold(args.regression_threshold)
|
||||
model.bert.set_patience(patience)
|
||||
model.bert.reset_stats()
|
||||
else:
|
||||
raise NotImplementedError()
|
||||
|
||||
# Loop to handle MNLI double evaluation (matched, mis-matched)
|
||||
eval_task_names = ("mnli", "mnli-mm") if args.task_name == "mnli" else (args.task_name,)
|
||||
eval_outputs_dirs = (args.output_dir, args.output_dir + "-MM") if args.task_name == "mnli" else (args.output_dir,)
|
||||
|
||||
results = {}
|
||||
for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs):
|
||||
eval_dataset = load_and_cache_examples(args, eval_task, tokenizer, evaluate=True)
|
||||
|
||||
if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
|
||||
os.makedirs(eval_output_dir)
|
||||
|
||||
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
|
||||
# Note that DistributedSampler samples randomly
|
||||
eval_sampler = SequentialSampler(eval_dataset)
|
||||
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
|
||||
|
||||
# multi-gpu eval
|
||||
if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel):
|
||||
model = torch.nn.DataParallel(model)
|
||||
|
||||
# Eval!
|
||||
logger.info("***** Running evaluation {} *****".format(prefix))
|
||||
logger.info(" Num examples = %d", len(eval_dataset))
|
||||
logger.info(" Batch size = %d", args.eval_batch_size)
|
||||
eval_loss = 0.0
|
||||
nb_eval_steps = 0
|
||||
preds = None
|
||||
out_label_ids = None
|
||||
for batch in tqdm(eval_dataloader, desc="Evaluating"):
|
||||
model.eval()
|
||||
batch = tuple(t.to(args.device) for t in batch)
|
||||
|
||||
with torch.no_grad():
|
||||
inputs = {
|
||||
"input_ids": batch[0],
|
||||
"attention_mask": batch[1],
|
||||
"labels": batch[3],
|
||||
}
|
||||
inputs["token_type_ids"] = batch[2]
|
||||
outputs = model(**inputs)
|
||||
tmp_eval_loss, logits = outputs[:2]
|
||||
|
||||
eval_loss += tmp_eval_loss.mean().item()
|
||||
nb_eval_steps += 1
|
||||
if preds is None:
|
||||
preds = logits.detach().cpu().numpy()
|
||||
out_label_ids = inputs["labels"].detach().cpu().numpy()
|
||||
else:
|
||||
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
|
||||
out_label_ids = np.append(out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0)
|
||||
|
||||
eval_loss = eval_loss / nb_eval_steps
|
||||
if args.output_mode == "classification":
|
||||
preds = np.argmax(preds, axis=1)
|
||||
elif args.output_mode == "regression":
|
||||
preds = np.squeeze(preds)
|
||||
result = compute_metrics(eval_task, preds, out_label_ids)
|
||||
results.update(result)
|
||||
|
||||
output_eval_file = os.path.join(eval_output_dir, prefix, "eval_results.txt")
|
||||
with open(output_eval_file, "w") as writer:
|
||||
logger.info("***** Eval results {} *****".format(prefix))
|
||||
for key in sorted(result.keys()):
|
||||
logger.info(" %s = %s", key, str(result[key]))
|
||||
print(" %s = %s" % (key, str(result[key])))
|
||||
writer.write("%s = %s\n" % (key, str(result[key])))
|
||||
|
||||
if args.eval_all_checkpoints and patience != 0:
|
||||
if args.model_type == "albert":
|
||||
model.albert.log_stats()
|
||||
elif args.model_type == "bert":
|
||||
model.bert.log_stats()
|
||||
else:
|
||||
raise NotImplementedError()
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def load_and_cache_examples(args, task, tokenizer, evaluate=False):
|
||||
if args.local_rank not in [-1, 0] and not evaluate:
|
||||
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
|
||||
|
||||
processor = processors[task]()
|
||||
output_mode = output_modes[task]
|
||||
# Load data features from cache or dataset file
|
||||
cached_features_file = os.path.join(
|
||||
args.data_dir,
|
||||
"cached_{}_{}_{}_{}".format(
|
||||
"dev" if evaluate else "train",
|
||||
list(filter(None, args.model_name_or_path.split("/"))).pop(),
|
||||
str(args.max_seq_length),
|
||||
str(task),
|
||||
),
|
||||
)
|
||||
if os.path.exists(cached_features_file) and not args.overwrite_cache:
|
||||
logger.info("Loading features from cached file %s", cached_features_file)
|
||||
features = torch.load(cached_features_file)
|
||||
else:
|
||||
logger.info("Creating features from dataset file at %s", args.data_dir)
|
||||
label_list = processor.get_labels()
|
||||
if task in ["mnli", "mnli-mm"] and args.model_type in ["roberta", "xlmroberta"]:
|
||||
# HACK(label indices are swapped in RoBERTa pretrained model)
|
||||
label_list[1], label_list[2] = label_list[2], label_list[1]
|
||||
examples = (
|
||||
processor.get_dev_examples(args.data_dir) if evaluate else processor.get_train_examples(args.data_dir)
|
||||
)
|
||||
features = convert_examples_to_features(
|
||||
examples, tokenizer, label_list=label_list, max_length=args.max_seq_length, output_mode=output_mode,
|
||||
)
|
||||
if args.local_rank in [-1, 0]:
|
||||
logger.info("Saving features into cached file %s", cached_features_file)
|
||||
torch.save(features, cached_features_file)
|
||||
|
||||
if args.local_rank == 0 and not evaluate:
|
||||
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
|
||||
|
||||
# Convert to Tensors and build dataset
|
||||
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
|
||||
all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)
|
||||
all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)
|
||||
if output_mode == "classification":
|
||||
all_labels = torch.tensor([f.label for f in features], dtype=torch.long)
|
||||
elif output_mode == "regression":
|
||||
all_labels = torch.tensor([f.label for f in features], dtype=torch.float)
|
||||
|
||||
dataset = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_labels)
|
||||
return dataset
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser()
|
||||
|
||||
# Required parameters
|
||||
parser.add_argument(
|
||||
"--data_dir",
|
||||
default=None,
|
||||
type=str,
|
||||
required=True,
|
||||
help="The input data dir. Should contain the .tsv files (or other data files) for the task.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--model_type",
|
||||
default=None,
|
||||
type=str,
|
||||
required=True,
|
||||
help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()),
|
||||
)
|
||||
parser.add_argument(
|
||||
"--model_name_or_path",
|
||||
default=None,
|
||||
type=str,
|
||||
required=True,
|
||||
help="Path to pre-trained model or shortcut name.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--task_name",
|
||||
default=None,
|
||||
type=str,
|
||||
required=True,
|
||||
help="The name of the task to train selected in the list: " + ", ".join(processors.keys()),
|
||||
)
|
||||
parser.add_argument(
|
||||
"--output_dir",
|
||||
default=None,
|
||||
type=str,
|
||||
required=True,
|
||||
help="The output directory where the model predictions and checkpoints will be written.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--patience", default="0", type=str, required=False,
|
||||
)
|
||||
parser.add_argument(
|
||||
"--regression_threshold", default=0, type=float, required=False,
|
||||
)
|
||||
|
||||
# Other parameters
|
||||
parser.add_argument(
|
||||
"--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--tokenizer_name",
|
||||
default="",
|
||||
type=str,
|
||||
help="Pretrained tokenizer name or path if not the same as model_name",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--cache_dir",
|
||||
default="",
|
||||
type=str,
|
||||
help="Where do you want to store the pre-trained models downloaded from s3",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--max_seq_length",
|
||||
default=128,
|
||||
type=int,
|
||||
help="The maximum total input sequence length after tokenization. Sequences longer "
|
||||
"than this will be truncated, sequences shorter will be padded.",
|
||||
)
|
||||
parser.add_argument("--do_train", action="store_true", help="Whether to run training.")
|
||||
parser.add_argument("--do_eval", action="store_true", help="Whether to run eval on the dev set.")
|
||||
parser.add_argument(
|
||||
"--evaluate_during_training", action="store_true", help="Run evaluation during training at each logging step.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--do_lower_case", action="store_true", help="Set this flag if you are using an uncased model.",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--per_gpu_train_batch_size", default=8, type=int, help="Batch size per GPU/CPU for training.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--per_gpu_eval_batch_size", default=1, type=int, help="Batch size per GPU/CPU for evaluation.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--gradient_accumulation_steps",
|
||||
type=int,
|
||||
default=1,
|
||||
help="Number of updates steps to accumulate before performing a backward/update pass.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.",
|
||||
)
|
||||
parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.")
|
||||
parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.")
|
||||
parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
|
||||
parser.add_argument(
|
||||
"--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--max_steps",
|
||||
default=-1,
|
||||
type=int,
|
||||
help="If > 0: set total number of training steps to perform. Override num_train_epochs.",
|
||||
)
|
||||
parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.")
|
||||
|
||||
parser.add_argument("--logging_steps", type=int, default=500, help="Log every X updates steps.")
|
||||
parser.add_argument(
|
||||
"--save_steps", type=int, default=500, help="Save checkpoint every X updates steps.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--eval_all_checkpoints",
|
||||
action="store_true",
|
||||
help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number",
|
||||
)
|
||||
parser.add_argument("--no_cuda", action="store_true", help="Avoid using CUDA when available")
|
||||
parser.add_argument(
|
||||
"--overwrite_output_dir", action="store_true", help="Overwrite the content of the output directory",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets",
|
||||
)
|
||||
parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
|
||||
|
||||
parser.add_argument(
|
||||
"--fp16",
|
||||
action="store_true",
|
||||
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--fp16_opt_level",
|
||||
type=str,
|
||||
default="O1",
|
||||
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
|
||||
"See details at https://nvidia.github.io/apex/amp.html",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--local_rank", type=int, default=-1, help="For distributed training: local_rank",
|
||||
)
|
||||
parser.add_argument("--server_ip", type=str, default="", help="For distant debugging.")
|
||||
parser.add_argument("--server_port", type=str, default="", help="For distant debugging.")
|
||||
args = parser.parse_args()
|
||||
|
||||
if (
|
||||
os.path.exists(args.output_dir)
|
||||
and os.listdir(args.output_dir)
|
||||
and args.do_train
|
||||
and not args.overwrite_output_dir
|
||||
):
|
||||
raise ValueError(
|
||||
"Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(
|
||||
args.output_dir
|
||||
)
|
||||
)
|
||||
|
||||
# Setup distant debugging if needed
|
||||
if args.server_ip and args.server_port:
|
||||
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
|
||||
import ptvsd
|
||||
|
||||
print("Waiting for debugger attach")
|
||||
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
|
||||
ptvsd.wait_for_attach()
|
||||
|
||||
# Setup CUDA, GPU & distributed training
|
||||
if args.local_rank == -1 or args.no_cuda:
|
||||
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
|
||||
args.n_gpu = torch.cuda.device_count()
|
||||
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
|
||||
torch.cuda.set_device(args.local_rank)
|
||||
device = torch.device("cuda", args.local_rank)
|
||||
torch.distributed.init_process_group(backend="nccl")
|
||||
args.n_gpu = 1
|
||||
args.device = device
|
||||
|
||||
# Setup logging
|
||||
logging.basicConfig(
|
||||
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
|
||||
datefmt="%m/%d/%Y %H:%M:%S",
|
||||
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,
|
||||
)
|
||||
logger.warning(
|
||||
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
|
||||
args.local_rank,
|
||||
device,
|
||||
args.n_gpu,
|
||||
bool(args.local_rank != -1),
|
||||
args.fp16,
|
||||
)
|
||||
|
||||
# Set seed
|
||||
set_seed(args)
|
||||
|
||||
# Prepare GLUE task
|
||||
args.task_name = args.task_name.lower()
|
||||
if args.task_name not in processors:
|
||||
raise ValueError("Task not found: %s" % (args.task_name))
|
||||
processor = processors[args.task_name]()
|
||||
args.output_mode = output_modes[args.task_name]
|
||||
label_list = processor.get_labels()
|
||||
num_labels = len(label_list)
|
||||
|
||||
if args.patience != "0" and args.per_gpu_eval_batch_size != 1:
|
||||
raise ValueError("The eval batch size must be 1 with PABEE inference on.")
|
||||
|
||||
# Load pretrained model and tokenizer
|
||||
if args.local_rank not in [-1, 0]:
|
||||
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
|
||||
|
||||
args.model_type = args.model_type.lower()
|
||||
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
|
||||
config = config_class.from_pretrained(
|
||||
args.config_name if args.config_name else args.model_name_or_path,
|
||||
num_labels=num_labels,
|
||||
finetuning_task=args.task_name,
|
||||
cache_dir=args.cache_dir if args.cache_dir else None,
|
||||
)
|
||||
tokenizer = tokenizer_class.from_pretrained(
|
||||
args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,
|
||||
do_lower_case=args.do_lower_case,
|
||||
cache_dir=args.cache_dir if args.cache_dir else None,
|
||||
)
|
||||
model = model_class.from_pretrained(
|
||||
args.model_name_or_path,
|
||||
from_tf=bool(".ckpt" in args.model_name_or_path),
|
||||
config=config,
|
||||
cache_dir=args.cache_dir if args.cache_dir else None,
|
||||
)
|
||||
|
||||
if args.local_rank == 0:
|
||||
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
|
||||
|
||||
model.to(args.device)
|
||||
|
||||
print("Total Model Parameters:", sum(param.numel() for param in model.parameters()))
|
||||
output_layers_param_num = sum(param.numel() for param in model.classifiers.parameters())
|
||||
print("Output Layers Parameters:", output_layers_param_num)
|
||||
single_output_layer_param_num = sum(param.numel() for param in model.classifiers[0].parameters())
|
||||
print(
|
||||
"Added Output Layers Parameters:", output_layers_param_num - single_output_layer_param_num,
|
||||
)
|
||||
|
||||
logger.info("Training/evaluation parameters %s", args)
|
||||
|
||||
# Training
|
||||
if args.do_train:
|
||||
train_dataset = load_and_cache_examples(args, args.task_name, tokenizer, evaluate=False)
|
||||
global_step, tr_loss = train(args, train_dataset, model, tokenizer)
|
||||
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
|
||||
|
||||
# Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained()
|
||||
if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
|
||||
logger.info("Saving model checkpoint to %s", args.output_dir)
|
||||
# Save a trained model, configuration and tokenizer using `save_pretrained()`.
|
||||
# They can then be reloaded using `from_pretrained()`
|
||||
model_to_save = (
|
||||
model.module if hasattr(model, "module") else model
|
||||
) # Take care of distributed/parallel training
|
||||
model_to_save.save_pretrained(args.output_dir)
|
||||
tokenizer.save_pretrained(args.output_dir)
|
||||
|
||||
# Good practice: save your training arguments together with the trained model
|
||||
torch.save(args, os.path.join(args.output_dir, "training_args.bin"))
|
||||
|
||||
# Load a trained model and vocabulary that you have fine-tuned
|
||||
model = model_class.from_pretrained(args.output_dir)
|
||||
tokenizer = tokenizer_class.from_pretrained(args.output_dir)
|
||||
model.to(args.device)
|
||||
|
||||
# Evaluation
|
||||
results = {}
|
||||
if args.do_eval and args.local_rank in [-1, 0]:
|
||||
patience_list = [int(x) for x in args.patience.split(",")]
|
||||
tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case)
|
||||
checkpoints = [args.output_dir]
|
||||
if args.eval_all_checkpoints:
|
||||
checkpoints = list(
|
||||
os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + "/**/" + WEIGHTS_NAME, recursive=True))
|
||||
)
|
||||
logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging
|
||||
logger.info("Evaluate the following checkpoints: %s", checkpoints)
|
||||
|
||||
for checkpoint in checkpoints:
|
||||
|
||||
global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else ""
|
||||
prefix = checkpoint.split("/")[-1] if checkpoint.find("checkpoint") != -1 else ""
|
||||
|
||||
model = model_class.from_pretrained(checkpoint)
|
||||
model.to(args.device)
|
||||
|
||||
print(f"Evaluation for checkpoint {prefix}")
|
||||
for patience in patience_list:
|
||||
result = evaluate(args, model, tokenizer, prefix=prefix, patience=patience)
|
||||
result = dict((k + "_{}".format(global_step), v) for k, v in result.items())
|
||||
results.update(result)
|
||||
return results
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
48
examples/bert-loses-patience/test_run_glue_with_pabee.py
Normal file
48
examples/bert-loses-patience/test_run_glue_with_pabee.py
Normal file
@ -0,0 +1,48 @@
|
||||
import argparse
|
||||
import logging
|
||||
import sys
|
||||
import unittest
|
||||
from unittest.mock import patch
|
||||
|
||||
import run_glue_with_pabee
|
||||
|
||||
|
||||
logging.basicConfig(level=logging.DEBUG)
|
||||
|
||||
logger = logging.getLogger()
|
||||
|
||||
|
||||
def get_setup_file():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("-f")
|
||||
args = parser.parse_args()
|
||||
return args.f
|
||||
|
||||
|
||||
class PabeeTests(unittest.TestCase):
|
||||
def test_run_glue(self):
|
||||
stream_handler = logging.StreamHandler(sys.stdout)
|
||||
logger.addHandler(stream_handler)
|
||||
|
||||
testargs = """
|
||||
run_glue_with_pabee.py
|
||||
--model_type albert
|
||||
--model_name_or_path albert-base-v2
|
||||
--data_dir ./tests/fixtures/tests_samples/MRPC/
|
||||
--task_name mrpc
|
||||
--do_train
|
||||
--do_eval
|
||||
--output_dir ./tests/fixtures/tests_samples/temp_dir
|
||||
--per_gpu_train_batch_size=2
|
||||
--per_gpu_eval_batch_size=1
|
||||
--learning_rate=2e-5
|
||||
--max_steps=50
|
||||
--warmup_steps=2
|
||||
--overwrite_output_dir
|
||||
--seed=42
|
||||
--max_seq_length=128
|
||||
""".split()
|
||||
with patch.object(sys, "argv", testargs):
|
||||
result = run_glue_with_pabee.main()
|
||||
for value in result.values():
|
||||
self.assertGreaterEqual(value, 0.75)
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user