mirror of
https://github.com/huggingface/transformers.git
synced 2025-10-20 17:13:56 +08:00
Compare commits
1252 Commits
v4.0.1-rel
...
v4.6.1
Author | SHA1 | Date | |
---|---|---|---|
fb27b276e7 | |||
8c8a5d3661 | |||
8924a5f3de | |||
c81584a292 | |||
265c26e19e | |||
25dee4a423 | |||
64e78564a5 | |||
fd6204b2a7 | |||
5c1cda9d3c | |||
77f4c46b50 | |||
6797cdc077 | |||
f063c56d94 | |||
8719afa1ad | |||
4ce6bcc310 | |||
f13f1f8fb8 | |||
d9b286272c | |||
a135f59536 | |||
b3429ab678 | |||
901153c61e | |||
64232bc0df | |||
024cd19bb7 | |||
9120ae7d66 | |||
dcb0e61430 | |||
05a930671f | |||
ef8d32c5ea | |||
575c979144 | |||
f7f872955d | |||
80da304a0f | |||
1a0b41781d | |||
f785c51692 | |||
7e406f4a65 | |||
f2ffcaf49f | |||
ba0d50f214 | |||
cd9b8d7efe | |||
da37eb8e43 | |||
39084ca663 | |||
e7bff0aabe | |||
dc3f6758cf | |||
6f40e31766 | |||
33fd83bc01 | |||
619200cc42 | |||
44c5621db0 | |||
7eee950ac3 | |||
cf409e5594 | |||
f594090a93 | |||
079557c1c5 | |||
c1780ce7a4 | |||
864c1dfe34 | |||
3e3e41ae20 | |||
8fa8e19429 | |||
83e59d8e0b | |||
bf0dfa98d3 | |||
c065025c47 | |||
6b241e0e3b | |||
0afe4a90f9 | |||
226e74b610 | |||
084a187da3 | |||
2ce0fb84cc | |||
09b0bcfea9 | |||
c40c7e213b | |||
7c622482e8 | |||
fe82b1bfa0 | |||
f4c9a7e62e | |||
1e8e06862f | |||
87dd1a00ef | |||
a721a5eefd | |||
f3cf8ae7b3 | |||
6a11e4c2ad | |||
1c86157d9d | |||
c448c01f25 | |||
623281aa12 | |||
a5d2967bd8 | |||
980208650a | |||
4e7bf94e72 | |||
282f3ac3ef | |||
804c2974d5 | |||
bc80f8bc37 | |||
84326a28f8 | |||
af0692a2ca | |||
57c8e822f7 | |||
20d6931e32 | |||
8b945ef03e | |||
c2cd02ac62 | |||
30ede8994e | |||
db9dd09cf9 | |||
76116f479b | |||
58c789e3d2 | |||
022a1e9e67 | |||
e0db8276a6 | |||
b43e3f93ac | |||
f37f2adb68 | |||
60d5bda4fd | |||
b29eb247d3 | |||
d6ec54ba36 | |||
ad1f7bef13 | |||
f748bd4242 | |||
3f6add8bab | |||
c0eb218a55 | |||
2d27900b5d | |||
8d43c71a1c | |||
7ceff67e1a | |||
88ac60f7b5 | |||
741d48f5c7 | |||
bc2571e61c | |||
0661abc545 | |||
1d30ec95c7 | |||
7959d83599 | |||
b03b2a653d | |||
ce11318e7e | |||
a753cafdc0 | |||
6715e3b6a1 | |||
ab2cabb964 | |||
b24ead87e1 | |||
e3e70f9551 | |||
d7633a4e46 | |||
38a716cd41 | |||
4bd6b54fa4 | |||
c1625b3261 | |||
4b72cfd958 | |||
32dbb2d954 | |||
04ab2ca639 | |||
30f065890e | |||
35cd8eed88 | |||
f45cb66bf6 | |||
52166f672e | |||
9cac4fab07 | |||
b7fc043fce | |||
81a6c7cd39 | |||
195bfd118a | |||
1ef152eb48 | |||
e3ff165aa5 | |||
ca6b80cadb | |||
3951fc55ee | |||
bd41a0f74d | |||
1811883e80 | |||
5c00918681 | |||
50f4539b82 | |||
bf2e0cf70b | |||
7bc86bea68 | |||
74e84f1fa6 | |||
c3d6f33918 | |||
a90d3f1862 | |||
2dc2d79ac7 | |||
b48cf7124c | |||
8c9b5fcbaf | |||
3ed5e97ba0 | |||
0f3ad1507e | |||
2617396094 | |||
881945c0b5 | |||
5b5e4ca366 | |||
58d8795d74 | |||
880154d2e1 | |||
6f14eab50b | |||
ff26f8ee3a | |||
5e04d70868 | |||
5aaf5aac0b | |||
6fe79e57d7 | |||
50595a3336 | |||
ac588594e2 | |||
9f72e8f4e1 | |||
41f3133a3a | |||
dabeb15292 | |||
ca7ff64f5b | |||
74712e22f3 | |||
aad95c7cde | |||
95dab34d55 | |||
f1b938fda8 | |||
cfd2eaa8cf | |||
f464f10a2c | |||
bfd83c17a7 | |||
c0328a6c26 | |||
95037a169f | |||
95ffbe1686 | |||
3981ce3dd2 | |||
5a34d8d982 | |||
d9c62047a8 | |||
e783ea7304 | |||
92970c0cb9 | |||
5254220e7f | |||
dfc6dd8584 | |||
2550b41aa2 | |||
6e1ee47b36 | |||
c3fcba3219 | |||
aaaed56ffc | |||
83206ca6a8 | |||
25e1af36e0 | |||
63ca402380 | |||
075e821d1d | |||
4670b57ce9 | |||
f25444cb22 | |||
7fe5aaa8b0 | |||
9337c6c668 | |||
653076ca30 | |||
3d339ee659 | |||
1ad7b0398c | |||
f38cd4373f | |||
9d8e8a8703 | |||
9fa2995993 | |||
3312e96bfb | |||
edca520d0f | |||
893e51a53f | |||
81009b7a5c | |||
22fa0a6004 | |||
896d7be974 | |||
823df93955 | |||
0cd89d8c83 | |||
7c205bf40c | |||
d49d3cf6d6 | |||
f243a5ec0d | |||
74d7c24d8d | |||
38a10c6b52 | |||
9f1260971f | |||
cb251ba619 | |||
0c6fcd3034 | |||
ef102c4886 | |||
623cd6aef9 | |||
a99f7f5c75 | |||
26212c14e5 | |||
716120cbd6 | |||
6f90c29eaa | |||
07f0bb691d | |||
c161dd56df | |||
fb41f9f50c | |||
45fc8c7951 | |||
6060746570 | |||
b9b60c1630 | |||
8b78a32be1 | |||
0311ba2153 | |||
269c9638df | |||
d31c7b104e | |||
c2e0fd5283 | |||
ba8b1f4754 | |||
97ccf67bb3 | |||
66446909b2 | |||
6c40e49712 | |||
dfed4ec263 | |||
9c9b8e707b | |||
ba2cf5f90d | |||
1ed24afe91 | |||
02ec02d6d3 | |||
c6d664849b | |||
acc851e1ff | |||
5bf5d50c8d | |||
f8e90d6fb9 | |||
ffe0761777 | |||
3fd7eee18f | |||
1c15128312 | |||
7442801df5 | |||
c0d97cee13 | |||
02f7c2fe66 | |||
11505fa139 | |||
424419f549 | |||
c9035e4537 | |||
247bed3857 | |||
083ad7d46c | |||
fd338abdeb | |||
aef4cf8c52 | |||
403d530eec | |||
520198f56f | |||
9853c5dd58 | |||
4906a29f7f | |||
2a8115f083 | |||
76800fb8e6 | |||
b219d6b5a5 | |||
6c1bee7d89 | |||
f7328de46d | |||
6ab7d1a429 | |||
2199608ca6 | |||
04ceee7d24 | |||
f05a8a0c5e | |||
090e3e6896 | |||
abb7430003 | |||
b51b87c41d | |||
e1c02e018c | |||
9f4e0c23d6 | |||
6c25f5228e | |||
eb3479e7cf | |||
773e4c7263 | |||
ef62f038fd | |||
6e31014110 | |||
3d39226a51 | |||
b0d49fd536 | |||
335c0ca35c | |||
34e1bec649 | |||
e8da77d181 | |||
f4ad3d8cea | |||
57c1749efa | |||
30677dc743 | |||
af6732225c | |||
c301c26370 | |||
838f83d84c | |||
455f81711f | |||
01068abdb9 | |||
cd56f3fe7e | |||
b6dddda4d2 | |||
acc3bd9d2a | |||
d0b3797a3b | |||
a8549bdd82 | |||
a96edb85c9 | |||
bf0840accc | |||
ced7284a60 | |||
645f45c462 | |||
e87505f3a1 | |||
e031162a6b | |||
3e09d813aa | |||
c32b432a67 | |||
e3c8443f08 | |||
83d38c9ff3 | |||
7772ddb473 | |||
860264379f | |||
a04eb8d369 | |||
8780caa388 | |||
604c085087 | |||
6dfd027279 | |||
700229f8a4 | |||
05c966f24b | |||
fb7fca718a | |||
5057213bcc | |||
ae6b6963ad | |||
4002f95eb6 | |||
d7b50ce469 | |||
06a6fea782 | |||
cc2366bbb9 | |||
ddea8771c6 | |||
b3544e4cc5 | |||
4f21e1ddd6 | |||
b0595d33c1 | |||
3c27d246e5 | |||
4b2b50aa7b | |||
86c6f8a8b1 | |||
9856c9213d | |||
e70068a719 | |||
f183a7a3c3 | |||
4684bfc757 | |||
1a3e0c4fe6 | |||
5f1491d3b3 | |||
1c06240e1b | |||
3b20e910b4 | |||
3c12e3c1c4 | |||
1f5ea9e04a | |||
f81077fcf3 | |||
1aed2b908e | |||
a735f727cc | |||
8c297cdb30 | |||
d4d4447d53 | |||
7ef40120a0 | |||
fb2b89840b | |||
3f48b2bc3e | |||
77ffd5edd5 | |||
bf1f43fbd7 | |||
2eb596f085 | |||
eb330e8904 | |||
e21f89f64c | |||
b5b957a65c | |||
77bf3fe787 | |||
9f8fa4e973 | |||
a8d4d6776d | |||
125ccead71 | |||
b230181d41 | |||
24ab5b08a3 | |||
2c6684239f | |||
8fb4671811 | |||
dbfe379514 | |||
29904a967b | |||
0f226f78ce | |||
82b8d8c7b0 | |||
af6125ffdb | |||
5aaf6e1460 | |||
be87b84276 | |||
68b55885ed | |||
21e86f99e6 | |||
1438c487df | |||
b9570a813c | |||
f2b744f690 | |||
946400fb68 | |||
fd1d9f1ab8 | |||
e8968bd03a | |||
117dba9948 | |||
427ea3fecb | |||
2ae678229f | |||
68a3215949 | |||
03df3fbcb4 | |||
e84adbed40 | |||
dcebe254fa | |||
008672e6e5 | |||
9352b5151a | |||
094afa515d | |||
4f3e93cfaf | |||
ce9724e1bd | |||
5f19c07a70 | |||
af8afdc88d | |||
0b98ca368f | |||
5c0bf39782 | |||
0282e24eef | |||
40b049c701 | |||
393739194e | |||
85a114ef47 | |||
3318c246f3 | |||
cd8c93f701 | |||
01c7fb04be | |||
0486ccdd3d | |||
d7e0d59bb7 | |||
8715d20c97 | |||
f20d75a13f | |||
c83fbc5f2d | |||
c23248443c | |||
73fe40898d | |||
2097aa1826 | |||
1b5ce1e63b | |||
c988db5af2 | |||
5c02b97ca2 | |||
a0a027c2ed | |||
1449222217 | |||
d3d388b934 | |||
b5492582d0 | |||
5dcc08f1df | |||
813d730c46 | |||
9f8619c6aa | |||
87d685b8a9 | |||
4c379daf64 | |||
966ba081c9 | |||
58f672e65c | |||
d41dd5359b | |||
f5c097fc4d | |||
d9e693e1d0 | |||
6bef764506 | |||
3f1714f8a7 | |||
6f840990a7 | |||
505494a86f | |||
e12d6f513e | |||
339fc51acc | |||
4c41c6622c | |||
fcf10214e0 | |||
bd8f6cafd4 | |||
4c32f9f26e | |||
fa35cda91e | |||
00cad2e5c1 | |||
e8246f78f9 | |||
184ef8ecd0 | |||
543d0549f8 | |||
ea46e3fa9c | |||
c526bde319 | |||
fa1a8d102f | |||
2f8485199c | |||
a01ea31b5c | |||
9fbb4cdc80 | |||
fda703a553 | |||
3ab6820370 | |||
a637ae00c4 | |||
7e4428749c | |||
2adc8c926a | |||
055ed78f52 | |||
89693e170d | |||
6d9e11a193 | |||
602d63f05c | |||
63c295ac05 | |||
27d9e05ce2 | |||
053f0197b8 | |||
26a33cfd8c | |||
49c61a4ae7 | |||
1aa9c13f70 | |||
2295d783d5 | |||
d26b37e744 | |||
efb5c0a453 | |||
44f64132a5 | |||
6f52fce673 | |||
72d9e039f9 | |||
0d909f6bd8 | |||
ac17f71159 | |||
c19c811a2d | |||
20c10258a4 | |||
95ab06778c | |||
9a06b6b11b | |||
b6a28e9ac9 | |||
546cbe7e9e | |||
696e8a4365 | |||
3ced9b3eb9 | |||
821d518e03 | |||
4196bfeda0 | |||
a8ec52efc2 | |||
b35e7b68ca | |||
f284089ec4 | |||
dfd16af832 | |||
917f104502 | |||
6f84531e61 | |||
5469369480 | |||
f882966004 | |||
b880508440 | |||
8fd7eb34e2 | |||
89b8d4f568 | |||
2a737bffef | |||
d59464db6b | |||
3b583d02d6 | |||
e6ce636e02 | |||
9dd054fba2 | |||
f6e74a63ca | |||
fd01104435 | |||
88a951e3cc | |||
90ecc29656 | |||
defe9e20fe | |||
7da995c00c | |||
3e056c1003 | |||
9f8bc87cbe | |||
6b58e15507 | |||
395ffcd757 | |||
54e55b52d4 | |||
dc9aaa3848 | |||
12b66215cf | |||
093b88f4e9 | |||
c503a1c15e | |||
6290169eb3 | |||
805c5200dc | |||
a5bd40b75c | |||
745ea78dcc | |||
f3660613bc | |||
948b730f97 | |||
b70f441b72 | |||
d064fb5647 | |||
188574ac50 | |||
801ff969ce | |||
39f70a4058 | |||
2d2ed2cc18 | |||
5dc303e281 | |||
1750e62900 | |||
b013842244 | |||
0c2325198f | |||
9248e27037 | |||
a106bde5a7 | |||
11655fafdd | |||
0234de8418 | |||
3c733f3208 | |||
aeba4f95bb | |||
256482ac92 | |||
aca6288ff4 | |||
f52a15897b | |||
311b7048c5 | |||
ee04b69822 | |||
a85eb616f7 | |||
98569d4ba2 | |||
d03695f3a2 | |||
7fc686efb1 | |||
83d2d55c94 | |||
17b6e0d474 | |||
26f8b2cb10 | |||
b040e6efc1 | |||
9d14be5c20 | |||
88cc26dcd1 | |||
63645b3b11 | |||
cb38ffcc5e | |||
9dc7825744 | |||
894db6701e | |||
55fe80d084 | |||
22bd047e91 | |||
3591844306 | |||
bdbb2c756b | |||
5f2a3d721c | |||
cdcdd5f03a | |||
2d458b2c7d | |||
3437d12134 | |||
4a1ab7cb6c | |||
23e87c27be | |||
83f890ddd1 | |||
461e8cacf9 | |||
622a8c5995 | |||
94d8767ba3 | |||
eab0afc19c | |||
f991daed18 | |||
9e147d31f6 | |||
e73a3e1891 | |||
19e737b93e | |||
cd8c4c3fc2 | |||
88605f37a6 | |||
cdd31b4de4 | |||
a2e379743c | |||
a0dfc2d30f | |||
9a7e63729f | |||
f6e53e3c2b | |||
536aee99bb | |||
cbadb5243c | |||
f1299f5038 | |||
709c86b5a9 | |||
34df26ec3a | |||
3e116ed331 | |||
86caeb7636 | |||
3d72d47f09 | |||
fb56bf2584 | |||
2fc6284f04 | |||
d27b28d958 | |||
4eddc459a9 | |||
d9a81fc0c5 | |||
c6fe17557e | |||
97e688bc22 | |||
d7f38c5d1d | |||
2acae50a0c | |||
14ed3b978e | |||
bdf1669e3f | |||
5da7c78ed8 | |||
dee876ceff | |||
d1eb88f42d | |||
7246785a67 | |||
fdb2351ebb | |||
83d803ba02 | |||
8d79e5ca49 | |||
4b91965731 | |||
e94d63f6cb | |||
4210cd96fc | |||
7169d1ea7b | |||
df1b0fb54d | |||
5c2d66a2f5 | |||
1c8c2d9ab3 | |||
96897a3535 | |||
8cbd0bd137 | |||
0b1f552a24 | |||
31b0560ab4 | |||
6fc940ed09 | |||
570218878a | |||
2a5c990038 | |||
c8d3fa0dfd | |||
93bd2f7099 | |||
900daec24e | |||
587197dcd2 | |||
8fae93ca19 | |||
803498318c | |||
698c9e2dbd | |||
c969366870 | |||
c9837a0d27 | |||
dd3a7f9641 | |||
641f418e10 | |||
eed31db948 | |||
1321356bdf | |||
f51188cbe7 | |||
31245775e5 | |||
b54cb0bd82 | |||
6710d1d5ef | |||
8e13b73593 | |||
d6b4f48ecb | |||
495c157d6f | |||
2f3b5f4dcc | |||
8dcfaea08d | |||
77b862847b | |||
c130e67dce | |||
22a32cf485 | |||
0d8e554d42 | |||
937f67074d | |||
d478257d9b | |||
7c07a47dfb | |||
1fbaa3c117 | |||
85395e4901 | |||
7c7962ba89 | |||
480a9d6ba0 | |||
0c3d23dff7 | |||
3e0c62b611 | |||
226973a9c5 | |||
4cda2d73ef | |||
b82fe7d258 | |||
e7381c4596 | |||
77c0ce8c0c | |||
78f4a0e7e5 | |||
63fddcf69c | |||
c6d5e56595 | |||
4ed763779e | |||
bf1a06a437 | |||
b972125ced | |||
ba542ffb49 | |||
263fac71a2 | |||
781220acab | |||
84acf0c7bb | |||
e4bf9910dc | |||
0dd579c9cf | |||
322037e842 | |||
f285e4c3ad | |||
ddaafd78fb | |||
ece6c51458 | |||
c9df1b1d53 | |||
3b7e612a5e | |||
cdd8659231 | |||
9e795eac88 | |||
31563e056d | |||
8bb52bd240 | |||
b1aa4982cd | |||
ae37ceacbd | |||
9a0399e18d | |||
b01483faa0 | |||
45aaf5f7ab | |||
04fd783cc5 | |||
d51302cca0 | |||
12e44af5d3 | |||
24db8cc329 | |||
769948fad2 | |||
8ea412a86f | |||
1cd16512dc | |||
b9720dd6f2 | |||
89be094e29 | |||
4bbad604eb | |||
ad2c431097 | |||
95a5f271e5 | |||
3be965c5db | |||
ba607db180 | |||
4cd22512de | |||
4739ce177d | |||
21b3922e35 | |||
d5888ef0ab | |||
8c3b1fcb67 | |||
714855bd8f | |||
b72f16b3ec | |||
aeb18b9224 | |||
e89c959af9 | |||
804cd185d8 | |||
00031785a8 | |||
7898fc03b1 | |||
6244727e05 | |||
2f06f2bcd6 | |||
75fd00fb25 | |||
ce08043f7a | |||
1486205d23 | |||
f2d5c04e1f | |||
bca0dd5ee3 | |||
5442a11f5f | |||
3f77c26d74 | |||
d55e10beab | |||
a1a67a3ced | |||
71bdc076dd | |||
d6217fb30c | |||
d996024af7 | |||
aa438a4265 | |||
62024453c3 | |||
de38a6e4d2 | |||
1809de5165 | |||
0f4dc5d864 | |||
538b3b4607 | |||
d1b14c9b54 | |||
343057e141 | |||
0e3be1ac8f | |||
0842c33edd | |||
8672bcda1f | |||
115d97dd2f | |||
1682804ebd | |||
24881008a6 | |||
6bab83683b | |||
d85691ac75 | |||
0c6c0afc0e | |||
74f16b8276 | |||
22121e813e | |||
40cfc355f1 | |||
1420b5ff67 | |||
6bf94bc0b6 | |||
7eadfe166e | |||
fdcde144d8 | |||
99b9affa02 | |||
c2d0ffec8c | |||
bc109ae5b8 | |||
80e4184fb0 | |||
15e4ce353a | |||
4c3ae89ad3 | |||
caddf9126b | |||
b4e559cfa1 | |||
2ee9f9b69e | |||
b936582f71 | |||
58fbef9ebc | |||
6cb0a6f01a | |||
25fcb5c171 | |||
5ed5a54684 | |||
7c6d63298f | |||
893120facc | |||
35d55b7b84 | |||
6b6c2b487f | |||
56c3f07a13 | |||
20932e5520 | |||
763ece2fea | |||
bd701ab1a0 | |||
c7b7bd9963 | |||
4adbdce5ee | |||
f0329ea516 | |||
a1720694a5 | |||
285c6262a8 | |||
a46050d0f5 | |||
f4bf0dea46 | |||
f2fabedbab | |||
2c891c156d | |||
d5b40d6693 | |||
f617490e71 | |||
e575e06287 | |||
059bb25817 | |||
eba418ac5d | |||
8edc98bb70 | |||
8f6c12d306 | |||
c37dcff764 | |||
0d0efd3a0e | |||
897a24c869 | |||
10e5f28212 | |||
781e4b1384 | |||
1867d9a8d7 | |||
cb73ab5a38 | |||
d94cc2f904 | |||
0fdbf0850a | |||
af41da5097 | |||
caf4abf768 | |||
0f443436fb | |||
fac7cfb16a | |||
626116b7d7 | |||
d63ab61525 | |||
6312fed47d | |||
9152f16023 | |||
b7b7e5d049 | |||
a449ffcbd2 | |||
82d46febeb | |||
411c582109 | |||
d7c31abf38 | |||
08b22722c7 | |||
5f80c15ef5 | |||
a7dabfb3d1 | |||
23e5a36ee6 | |||
3f290e6c84 | |||
248fa1ae72 | |||
ca422e3d7d | |||
c8ea582ed6 | |||
fb36c273a2 | |||
910aa89671 | |||
6a346f0358 | |||
4a20b7c450 | |||
7acfa95afb | |||
5a307ece82 | |||
3cd91e8162 | |||
2a703773aa | |||
cd5565bed3 | |||
538245b0c2 | |||
88583d4958 | |||
d1370d29b1 | |||
a7b62fece5 | |||
8940c7662d | |||
7251a4736d | |||
14042d560f | |||
12f0d7e8e0 | |||
76f36e183a | |||
582f516adb | |||
a98173cc45 | |||
a1ad16a446 | |||
7e662e6a3b | |||
2ebbbf558c | |||
e4c06ed664 | |||
fa876aee2a | |||
11ec74905a | |||
b020a736c3 | |||
97b787fb4e | |||
d302d88b47 | |||
053efc5d2d | |||
2390c16fd2 | |||
b39bd763e8 | |||
917dbb15e0 | |||
12c1b5b8f4 | |||
357fb1c5d8 | |||
65eb5d9ac5 | |||
72fc9abf17 | |||
c60e0e1ee4 | |||
6d3b688b04 | |||
8eba1f8ca8 | |||
90ca8d36e9 | |||
85788bae5c | |||
82498cbc37 | |||
329fe2746a | |||
3f40070c88 | |||
e43f3b6190 | |||
280db79ac1 | |||
8bf27075a2 | |||
c99751dd9d | |||
a26536f0c8 | |||
14d677ca4a | |||
46ed56cfd1 | |||
5e1bea4f16 | |||
126fd281bc | |||
e63cad7936 | |||
33a8497db8 | |||
7d9a9d0c72 | |||
c949516695 | |||
04dc65e5c6 | |||
b2dfcc567b | |||
eabad8fd9c | |||
0c9f01a8e5 | |||
27d0e01d75 | |||
245cdb469d | |||
247a7b2029 | |||
69ed36063a | |||
2df34f4aba | |||
5f6721032a | |||
063d8d27f4 | |||
e6ecef711e | |||
250f27f207 | |||
dfbf0f5598 | |||
a1100fac67 | |||
e45eba3b1c | |||
ccd1923f46 | |||
2aa9c2f204 | |||
406cbf58b2 | |||
3b67c5abb0 | |||
a051d8928a | |||
7f28613213 | |||
0ecbb69806 | |||
e6f211cade | |||
01a1684078 | |||
6009668c63 | |||
ba702966ba | |||
33b7422839 | |||
6f63501383 | |||
d20e9c7299 | |||
8d25df2c7a | |||
5a442a8db1 | |||
6c8ec2a931 | |||
1e3c362235 | |||
d415882b41 | |||
1243ee7d0c | |||
cf416764f4 | |||
09926c8e86 | |||
4f7022d68d | |||
96f1f74aaf | |||
1c19b423bf | |||
02e05fb0a5 | |||
4fbcf8ea49 | |||
e34e45536f | |||
1bdf42409c | |||
79bbcc5260 | |||
9e1ea846bc | |||
bf9056442a | |||
f33a6f3446 | |||
758ed3332b | |||
a400fe8931 | |||
ae5a32bb0d | |||
812045adcc | |||
390cf16bc8 | |||
28d74872cc | |||
3ec40299c1 | |||
b8462b5b2a | |||
0c96262f7d | |||
c89f1bc92e | |||
7a9f1b5c99 | |||
ecfcac223c | |||
be898998bb | |||
b972c1bfb0 | |||
bcb55d33ce | |||
b7e548976f | |||
9f675b05d4 | |||
453a70d4cb | |||
7988edc031 | |||
c9553c0352 | |||
090d28e32d | |||
d64372fdfc | |||
eef66035a2 | |||
4eec5d0cf6 | |||
d9e848c1d6 | |||
29acabd886 | |||
57a6626929 | |||
189387e9b2 | |||
314cca2842 | |||
52d62e686c | |||
748006c0b3 | |||
4225740a7b | |||
4aa8f6ad99 | |||
83eec97ec6 | |||
30fa0b780f | |||
143289dcf7 | |||
086718ac6e | |||
47ca0eaaac | |||
75ff530551 | |||
ec54d70e16 | |||
de29ff9bd2 | |||
d018afced0 | |||
d735b074d7 | |||
5dd389d1c7 | |||
23a71449c0 | |||
6c03d4ac70 | |||
c581d8af7a | |||
8eb7f26d5d | |||
d944966b19 | |||
c4fd609afb | |||
b01f451ca3 | |||
5f7a07c0c8 | |||
ae333d04b2 | |||
8217d4e37f | |||
912f6881d2 | |||
785e52cd30 | |||
64103fb6be | |||
d97d06d05f | |||
83fdd252f6 | |||
8e74eca7f2 | |||
61443cd7d9 | |||
21fc676645 | |||
52b3a05e83 | |||
7777db159f | |||
71963a6633 | |||
f3a3b91d6f | |||
2a18b70998 | |||
6189ae9960 | |||
222dbdb203 | |||
6c091abef2 | |||
88ef8893cd | |||
a1cb6e9866 | |||
bcc87c639f | |||
d5db6c37d4 | |||
4bafc43b0e | |||
58e8a7611f | |||
cbe63949d7 | |||
e6c1f1cad8 | |||
ab17758874 | |||
5b5f7dd09c | |||
1558d191e6 | |||
37d6fb5d04 | |||
189c1b91a6 | |||
490b39e614 | |||
1fc7119181 | |||
e9d77ccd5a | |||
ec07da65e2 | |||
4eef5889ac | |||
9a12b9696f | |||
f4432b7e01 | |||
08abdabda1 | |||
161a6461db | |||
5a8a4eb187 | |||
6b034309ca | |||
a4b21cdd20 | |||
f38c4ad302 | |||
e0e255be1f | |||
6b850b671d | |||
3ff5e8955a | |||
291974c65c | |||
1198ba8fba | |||
9a25c5bd3a | |||
3e56e2ce04 | |||
077a5dce32 | |||
84d5879eaf | |||
fd7b6a5274 | |||
66a14a2f6f | |||
f06d0fadc9 | |||
467e9158b4 | |||
63841c559b | |||
bf713cdec7 | |||
bd40345d3e | |||
bfa4ccf77d | |||
e0790cca78 | |||
6d2e864db7 | |||
f83d9c8da7 | |||
f5438ab8a2 | |||
ac2c7e398f | |||
77d6941e64 | |||
1aca3d6afa | |||
dc9f245442 | |||
9a67185344 | |||
1c1a2ffbff | |||
07384baf7a | |||
34334662df | |||
2f918defa8 | |||
4d48973523 | |||
fb650df859 | |||
c69d19faa8 | |||
640e6fe190 | |||
51adb97cd6 | |||
1551e2dc6d | |||
ad895af98d | |||
0b2f46fa9e | |||
2a7e8e1608 | |||
e771749777 | |||
18ecd36f65 | |||
d018622d8e | |||
80bdb9c31a | |||
3caba8d35f | |||
abc573f51a | |||
389aba34bf | |||
ef2d4cd445 | |||
6ccea0486f | |||
59da3f2700 | |||
14c79c3e31 | |||
ed1845ef4c | |||
44c340f45f | |||
c19d04623e | |||
251eb70c97 | |||
e4ef57a9bb | |||
df3f4d2aef | |||
a9c8bff724 | |||
b00eb4fb02 | |||
74daf1f954 | |||
d6af344c9e | |||
fa1ddced9e | |||
6587cf9f84 | |||
51d9c569fa | |||
3552d0e0d8 | |||
29e4597950 | |||
9cc9f4122e | |||
24f6cdeab6 | |||
91fa707217 | |||
70527ba694 | |||
783d7d2629 | |||
86896de064 | |||
7c8f5f6487 | |||
5527f78721 | |||
c615df7422 | |||
76df559383 | |||
b161f1ae54 | |||
649d389dab | |||
5e794b6628 | |||
935e346959 | |||
b01ddc9577 | |||
91ab02af28 | |||
8d4bb02056 | |||
1310e1a758 | |||
51e81e5895 | |||
35bffd70e2 | |||
c95de29e31 | |||
5e637e6c69 | |||
06971ac4f9 | |||
75627148ee | |||
df2af6d8b8 | |||
8729109855 | |||
e977ed2142 | |||
da37a21c89 | |||
61abd50b98 | |||
7e1d709e2a | |||
02d0e0355c | |||
67ff1c314a | |||
447808c85f | |||
7809eb82ae | |||
b7cdd00f15 | |||
04c446f764 | |||
0d9e6ca9ed | |||
bf7f79cd57 | |||
9d7d0005b0 | |||
2ae7388eee | |||
00aa9dbca2 | |||
c108d0b5a4 | |||
62d30e0583 | |||
28fa014a1f | |||
0bce7c5508 | |||
7ccd973ea1 | |||
37f4c24f10 | |||
7f9ccffc5b | |||
de6befd41f | |||
483e13273f | |||
28c77ddf3b | |||
72d6c9c68b | |||
ef93a25427 | |||
8dfc8c7221 | |||
df311a5ccf | |||
73c51f7fcd | |||
71688a8889 | |||
dcd3046f98 | |||
4c3d98dddc | |||
aa60b230ec | |||
0c5615af66 | |||
9ad6194318 | |||
6ed7e32f7c | |||
8453201cfe | |||
0deece9c53 | |||
2b7fc9a0fd | |||
443f67e887 | |||
e52f9c0ade | |||
801b2cb36f | |||
7e1cb00c37 | |||
a8c3f9aa76 | |||
f6b44e6190 | |||
24f0c2fe33 | |||
693ac3594b | |||
379005c9d2 | |||
b08843cf4d | |||
7c10dd22ae | |||
21db560df3 | |||
a947386cee | |||
9c18f15685 | |||
c0df963ee1 | |||
d366228df1 | |||
814b9550d7 | |||
4a9e502a36 | |||
7f34d75780 | |||
d8fc26e919 | |||
5fd3d81ec9 | |||
51b071313b | |||
9995a341c9 | |||
22b0ff757a | |||
5530299096 | |||
773849415a | |||
4062c75e44 | |||
08e707633c | |||
75f8100fc7 | |||
cc983cd9cd | |||
19fa01ce2a | |||
40ecaf0c2b | |||
610cb106a2 | |||
c239dcda83 | |||
3a08cc1ce7 | |||
5ced23dc84 | |||
36b60ce9e8 | |||
18c32eeb21 | |||
edbff1fd00 | |||
00ea45659f | |||
0a921b6459 | |||
81fe0bf085 | |||
b0f2dbc594 | |||
03bddc375b | |||
f9a2a9e32b | |||
a7d46a0609 | |||
a2cf37595e | |||
e3ef62bce1 | |||
f8eda599bd | |||
cb7602b38d | |||
ddf3c64654 | |||
52708d2637 | |||
8f07f5c44b | |||
66e9608bae | |||
5aa361f3e5 | |||
30e7f7e5da | |||
2a6fbe6a40 | |||
369f1d77b4 | |||
138f45c184 | |||
4821ea5aeb | |||
90d5ab3bfe | |||
29d4992453 | |||
82d443a7fd | |||
a7d73cfdd4 | |||
8d4ed7e953 | |||
e09e54fd9d | |||
6fdd0bb231 | |||
2c83b3c38d | |||
9e71aa2f8f | |||
02f48b9bfc | |||
7f2c00913a | |||
e1b7e10d5f | |||
8ffc01a76a | |||
367f497dec | |||
e84786aaa6 | |||
49759c0cda | |||
1cd9be2aeb | |||
1e45bef0a7 | |||
900024273b | |||
0cc5ab1333 | |||
eec76615f6 | |||
143b564e59 | |||
18c8cf000b | |||
48cc224703 | |||
52585e40af | |||
b5187e317f | |||
b6d864e2f0 | |||
e1f3156b21 | |||
9c0afdaf7b | |||
29bdb88368 | |||
2594bd8b73 | |||
8062fa63c5 | |||
63e91f5fde | |||
94caaa93c2 | |||
6494910f27 | |||
0ad45e108d | |||
0e19a4c2d6 | |||
06518404cb | |||
297a29382f | |||
42111f1d56 | |||
20b658607e | |||
ca0109bd68 |
@ -68,6 +68,8 @@ jobs:
|
||||
- image: circleci/python:3.6
|
||||
environment:
|
||||
OMP_NUM_THREADS: 1
|
||||
RUN_PT_TF_CROSS_TESTS: yes
|
||||
TRANSFORMERS_IS_CI: yes
|
||||
resource_class: xlarge
|
||||
parallelism: 1
|
||||
steps:
|
||||
@ -76,13 +78,45 @@ jobs:
|
||||
keys:
|
||||
- v0.4-torch_and_tf-{{ checksum "setup.py" }}
|
||||
- v0.4-{{ checksum "setup.py" }}
|
||||
- run: sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev
|
||||
- run: pip install --upgrade pip
|
||||
- run: pip install .[sklearn,tf-cpu,torch,testing]
|
||||
- run: pip install .[sklearn,tf-cpu,torch,testing,sentencepiece,speech,vision]
|
||||
- run: pip install torch-scatter -f https://pytorch-geometric.com/whl/torch-1.8.0+cpu.html
|
||||
- save_cache:
|
||||
key: v0.4-{{ checksum "setup.py" }}
|
||||
paths:
|
||||
- '~/.cache/pip'
|
||||
- run: RUN_PT_TF_CROSS_TESTS=1 python -m pytest -n 8 --dist=loadfile -rA -s --make-reports=tests_torch_and_tf ./tests/ -m is_pt_tf_cross_test --durations=0 | tee tests_output.txt
|
||||
- run: python -m pytest -n 8 --dist=loadfile -rA -s --make-reports=tests_torch_and_tf ./tests/ -m is_pt_tf_cross_test --durations=0 | tee tests_output.txt
|
||||
- store_artifacts:
|
||||
path: ~/transformers/tests_output.txt
|
||||
- store_artifacts:
|
||||
path: ~/transformers/reports
|
||||
|
||||
run_tests_torch_and_flax:
|
||||
working_directory: ~/transformers
|
||||
docker:
|
||||
- image: circleci/python:3.6
|
||||
environment:
|
||||
OMP_NUM_THREADS: 1
|
||||
RUN_PT_FLAX_CROSS_TESTS: yes
|
||||
TRANSFORMERS_IS_CI: yes
|
||||
resource_class: xlarge
|
||||
parallelism: 1
|
||||
steps:
|
||||
- checkout
|
||||
- restore_cache:
|
||||
keys:
|
||||
- v0.4-torch_and_flax-{{ checksum "setup.py" }}
|
||||
- v0.4-{{ checksum "setup.py" }}
|
||||
- run: sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev
|
||||
- run: pip install --upgrade pip
|
||||
- run: pip install .[sklearn,flax,torch,testing,sentencepiece,speech,vision]
|
||||
- run: pip install torch-scatter -f https://pytorch-geometric.com/whl/torch-1.8.0+cpu.html
|
||||
- save_cache:
|
||||
key: v0.4-{{ checksum "setup.py" }}
|
||||
paths:
|
||||
- '~/.cache/pip'
|
||||
- run: python -m pytest -n 8 --dist=loadfile -rA -s --make-reports=tests_torch_and_flax ./tests/ -m is_pt_flax_cross_test --durations=0 | tee tests_output.txt
|
||||
- store_artifacts:
|
||||
path: ~/transformers/tests_output.txt
|
||||
- store_artifacts:
|
||||
@ -94,6 +128,7 @@ jobs:
|
||||
- image: circleci/python:3.7
|
||||
environment:
|
||||
OMP_NUM_THREADS: 1
|
||||
TRANSFORMERS_IS_CI: yes
|
||||
resource_class: xlarge
|
||||
parallelism: 1
|
||||
steps:
|
||||
@ -102,13 +137,15 @@ jobs:
|
||||
keys:
|
||||
- v0.4-torch-{{ checksum "setup.py" }}
|
||||
- v0.4-{{ checksum "setup.py" }}
|
||||
- run: sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev
|
||||
- run: pip install --upgrade pip
|
||||
- run: pip install .[sklearn,torch,testing]
|
||||
- run: pip install .[sklearn,torch,testing,sentencepiece,speech,vision]
|
||||
- run: pip install torch-scatter -f https://pytorch-geometric.com/whl/torch-1.8.0+cpu.html
|
||||
- save_cache:
|
||||
key: v0.4-torch-{{ checksum "setup.py" }}
|
||||
paths:
|
||||
- '~/.cache/pip'
|
||||
- run: python -m pytest -n 8 --dist=loadfile -s --make-reports=tests_torch ./tests/ | tee tests_output.txt
|
||||
- run: python -m pytest -n 3 --dist=loadfile -s --make-reports=tests_torch ./tests/ | tee tests_output.txt
|
||||
- store_artifacts:
|
||||
path: ~/transformers/tests_output.txt
|
||||
- store_artifacts:
|
||||
@ -120,6 +157,7 @@ jobs:
|
||||
- image: circleci/python:3.7
|
||||
environment:
|
||||
OMP_NUM_THREADS: 1
|
||||
TRANSFORMERS_IS_CI: yes
|
||||
resource_class: xlarge
|
||||
parallelism: 1
|
||||
steps:
|
||||
@ -129,7 +167,7 @@ jobs:
|
||||
- v0.4-tf-{{ checksum "setup.py" }}
|
||||
- v0.4-{{ checksum "setup.py" }}
|
||||
- run: pip install --upgrade pip
|
||||
- run: pip install .[sklearn,tf-cpu,testing]
|
||||
- run: pip install .[sklearn,tf-cpu,testing,sentencepiece]
|
||||
- save_cache:
|
||||
key: v0.4-tf-{{ checksum "setup.py" }}
|
||||
paths:
|
||||
@ -146,6 +184,7 @@ jobs:
|
||||
- image: circleci/python:3.7
|
||||
environment:
|
||||
OMP_NUM_THREADS: 1
|
||||
TRANSFORMERS_IS_CI: yes
|
||||
resource_class: xlarge
|
||||
parallelism: 1
|
||||
steps:
|
||||
@ -155,7 +194,7 @@ jobs:
|
||||
- v0.4-flax-{{ checksum "setup.py" }}
|
||||
- v0.4-{{ checksum "setup.py" }}
|
||||
- run: pip install --upgrade pip
|
||||
- run: sudo pip install .[flax,sklearn,torch,testing]
|
||||
- run: sudo pip install .[flax,testing,sentencepiece]
|
||||
- save_cache:
|
||||
key: v0.4-flax-{{ checksum "setup.py" }}
|
||||
paths:
|
||||
@ -172,6 +211,8 @@ jobs:
|
||||
- image: circleci/python:3.7
|
||||
environment:
|
||||
OMP_NUM_THREADS: 1
|
||||
RUN_PIPELINE_TESTS: yes
|
||||
TRANSFORMERS_IS_CI: yes
|
||||
resource_class: xlarge
|
||||
parallelism: 1
|
||||
steps:
|
||||
@ -180,13 +221,15 @@ jobs:
|
||||
keys:
|
||||
- v0.4-torch-{{ checksum "setup.py" }}
|
||||
- v0.4-{{ checksum "setup.py" }}
|
||||
- run: sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev
|
||||
- run: pip install --upgrade pip
|
||||
- run: pip install .[sklearn,torch,testing]
|
||||
- run: pip install .[sklearn,torch,testing,sentencepiece,speech,vision]
|
||||
- run: pip install torch-scatter -f https://pytorch-geometric.com/whl/torch-1.8.0+cpu.html
|
||||
- save_cache:
|
||||
key: v0.4-torch-{{ checksum "setup.py" }}
|
||||
paths:
|
||||
- '~/.cache/pip'
|
||||
- run: RUN_PIPELINE_TESTS=1 python -m pytest -n 8 --dist=loadfile -rA -s --make-reports=tests_pipelines_torch -m is_pipeline_test ./tests/ | tee tests_output.txt
|
||||
- run: python -m pytest -n 8 --dist=loadfile -rA -s --make-reports=tests_pipelines_torch -m is_pipeline_test ./tests/ | tee tests_output.txt
|
||||
- store_artifacts:
|
||||
path: ~/transformers/tests_output.txt
|
||||
- store_artifacts:
|
||||
@ -198,6 +241,8 @@ jobs:
|
||||
- image: circleci/python:3.7
|
||||
environment:
|
||||
OMP_NUM_THREADS: 1
|
||||
RUN_PIPELINE_TESTS: yes
|
||||
TRANSFORMERS_IS_CI: yes
|
||||
resource_class: xlarge
|
||||
parallelism: 1
|
||||
steps:
|
||||
@ -207,12 +252,12 @@ jobs:
|
||||
- v0.4-tf-{{ checksum "setup.py" }}
|
||||
- v0.4-{{ checksum "setup.py" }}
|
||||
- run: pip install --upgrade pip
|
||||
- run: pip install .[sklearn,tf-cpu,testing]
|
||||
- run: pip install .[sklearn,tf-cpu,testing,sentencepiece]
|
||||
- save_cache:
|
||||
key: v0.4-tf-{{ checksum "setup.py" }}
|
||||
paths:
|
||||
- '~/.cache/pip'
|
||||
- run: RUN_PIPELINE_TESTS=1 python -m pytest -n 8 --dist=loadfile -rA -s --make-reports=tests_pipelines_tf ./tests/ -m is_pipeline_test | tee tests_output.txt
|
||||
- run: python -m pytest -n 8 --dist=loadfile -rA -s --make-reports=tests_pipelines_tf ./tests/ -m is_pipeline_test | tee tests_output.txt
|
||||
- store_artifacts:
|
||||
path: ~/transformers/tests_output.txt
|
||||
- store_artifacts:
|
||||
@ -221,9 +266,10 @@ jobs:
|
||||
run_tests_custom_tokenizers:
|
||||
working_directory: ~/transformers
|
||||
docker:
|
||||
- image: circleci/python:3.6
|
||||
- image: circleci/python:3.7
|
||||
environment:
|
||||
RUN_CUSTOM_TOKENIZERS: yes
|
||||
TRANSFORMERS_IS_CI: yes
|
||||
steps:
|
||||
- checkout
|
||||
- restore_cache:
|
||||
@ -231,7 +277,7 @@ jobs:
|
||||
- v0.4-custom_tokenizers-{{ checksum "setup.py" }}
|
||||
- v0.4-{{ checksum "setup.py" }}
|
||||
- run: pip install --upgrade pip
|
||||
- run: pip install .[ja,testing]
|
||||
- run: pip install .[ja,testing,sentencepiece,jieba]
|
||||
- run: python -m unidic download
|
||||
- save_cache:
|
||||
key: v0.4-custom_tokenizers-{{ checksum "setup.py" }}
|
||||
@ -249,6 +295,7 @@ jobs:
|
||||
- image: circleci/python:3.6
|
||||
environment:
|
||||
OMP_NUM_THREADS: 1
|
||||
TRANSFORMERS_IS_CI: yes
|
||||
resource_class: xlarge
|
||||
parallelism: 1
|
||||
steps:
|
||||
@ -258,18 +305,46 @@ jobs:
|
||||
- v0.4-torch_examples-{{ checksum "setup.py" }}
|
||||
- v0.4-{{ checksum "setup.py" }}
|
||||
- run: pip install --upgrade pip
|
||||
- run: pip install .[sklearn,torch,testing]
|
||||
- run: pip install -r examples/requirements.txt
|
||||
- run: pip install .[sklearn,torch,sentencepiece,testing]
|
||||
- run: pip install -r examples/pytorch/_tests_requirements.txt
|
||||
- save_cache:
|
||||
key: v0.4-torch_examples-{{ checksum "setup.py" }}
|
||||
paths:
|
||||
- '~/.cache/pip'
|
||||
- run: python -m pytest -n 8 --dist=loadfile -s --make-reports=examples_torch ./examples/ | tee examples_output.txt
|
||||
- run: TRANSFORMERS_IS_CI=1 python -m pytest -n 8 --dist=loadfile -s --make-reports=examples_torch ./examples/pytorch/ | tee examples_output.txt
|
||||
- store_artifacts:
|
||||
path: ~/transformers/examples_output.txt
|
||||
- store_artifacts:
|
||||
path: ~/transformers/reports
|
||||
|
||||
run_tests_hub:
|
||||
working_directory: ~/transformers
|
||||
docker:
|
||||
- image: circleci/python:3.7
|
||||
environment:
|
||||
HUGGINGFACE_CO_STAGING: yes
|
||||
RUN_GIT_LFS_TESTS: yes
|
||||
TRANSFORMERS_IS_CI: yes
|
||||
resource_class: xlarge
|
||||
parallelism: 1
|
||||
steps:
|
||||
- checkout
|
||||
- restore_cache:
|
||||
keys:
|
||||
- v0.4-hub-{{ checksum "setup.py" }}
|
||||
- v0.4-{{ checksum "setup.py" }}
|
||||
- run: sudo apt-get install git-lfs
|
||||
- run: |
|
||||
git config --global user.email "ci@dummy.com"
|
||||
git config --global user.name "ci"
|
||||
- run: pip install --upgrade pip
|
||||
- run: pip install .[torch,sentencepiece,testing]
|
||||
- save_cache:
|
||||
key: v0.4-hub-{{ checksum "setup.py" }}
|
||||
paths:
|
||||
- '~/.cache/pip'
|
||||
- run: python -m pytest -sv ./tests/ -m is_staging_test
|
||||
|
||||
build_doc:
|
||||
working_directory: ~/transformers
|
||||
docker:
|
||||
@ -280,13 +355,14 @@ jobs:
|
||||
keys:
|
||||
- v0.4-build_doc-{{ checksum "setup.py" }}
|
||||
- v0.4-{{ checksum "setup.py" }}
|
||||
- run: sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev
|
||||
- run: pip install --upgrade pip
|
||||
- run: pip install ."[all, docs]"
|
||||
- run: pip install ."[docs]"
|
||||
- save_cache:
|
||||
key: v0.4-build_doc-{{ checksum "setup.py" }}
|
||||
paths:
|
||||
- '~/.cache/pip'
|
||||
- run: cd docs && make html SPHINXOPTS="-W"
|
||||
- run: cd docs && make html SPHINXOPTS="-W -j 4"
|
||||
- store_artifacts:
|
||||
path: ./docs/_build
|
||||
|
||||
@ -303,7 +379,9 @@ jobs:
|
||||
keys:
|
||||
- v0.4-deploy_doc-{{ checksum "setup.py" }}
|
||||
- v0.4-{{ checksum "setup.py" }}
|
||||
- run: pip install ."[all,docs]"
|
||||
- run: sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev
|
||||
- run: pip install --upgrade pip
|
||||
- run: pip install ."[docs]"
|
||||
- save_cache:
|
||||
key: v0.4-deploy_doc-{{ checksum "setup.py" }}
|
||||
paths:
|
||||
@ -315,6 +393,8 @@ jobs:
|
||||
docker:
|
||||
- image: circleci/python:3.6
|
||||
resource_class: medium
|
||||
environment:
|
||||
TRANSFORMERS_IS_CI: yes
|
||||
parallelism: 1
|
||||
steps:
|
||||
- checkout
|
||||
@ -324,18 +404,21 @@ jobs:
|
||||
- v0.4-{{ checksum "setup.py" }}
|
||||
- run: pip install --upgrade pip
|
||||
- run: pip install isort
|
||||
- run: pip install .[tf,torch,flax,quality]
|
||||
- run: pip install .[all,quality]
|
||||
- save_cache:
|
||||
key: v0.4-code_quality-{{ checksum "setup.py" }}
|
||||
paths:
|
||||
- '~/.cache/pip'
|
||||
- run: black --check examples tests src utils
|
||||
- run: isort --check-only examples tests src utils
|
||||
- run: python utils/custom_init_isort.py --check_only
|
||||
- run: flake8 examples tests src utils
|
||||
- run: python utils/style_doc.py src/transformers docs/source --max_len 119 --check_only
|
||||
- run: python utils/check_copies.py
|
||||
- run: python utils/check_table.py
|
||||
- run: python utils/check_dummies.py
|
||||
- run: python utils/check_repo.py
|
||||
- run: python utils/check_inits.py
|
||||
|
||||
check_repository_consistency:
|
||||
working_directory: ~/transformers
|
||||
@ -354,6 +437,7 @@ jobs:
|
||||
- image: circleci/python:3.6
|
||||
environment:
|
||||
OMP_NUM_THREADS: 1
|
||||
TRANSFORMERS_IS_CI: yes
|
||||
resource_class: xlarge
|
||||
parallelism: 1
|
||||
steps:
|
||||
@ -392,22 +476,24 @@ workflows:
|
||||
- run_examples_torch
|
||||
- run_tests_custom_tokenizers
|
||||
- run_tests_torch_and_tf
|
||||
- run_tests_torch_and_flax
|
||||
- run_tests_torch
|
||||
- run_tests_tf
|
||||
- run_tests_flax
|
||||
- run_tests_pipelines_torch
|
||||
- run_tests_pipelines_tf
|
||||
- run_tests_hub
|
||||
- build_doc
|
||||
- deploy_doc: *workflow_filters
|
||||
tpu_testing_jobs:
|
||||
triggers:
|
||||
- schedule:
|
||||
# Set to run at the first minute of every hour.
|
||||
cron: "0 8 * * *"
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
jobs:
|
||||
- cleanup-gke-jobs
|
||||
- run_examples_tpu
|
||||
# tpu_testing_jobs:
|
||||
# triggers:
|
||||
# - schedule:
|
||||
# # Set to run at the first minute of every hour.
|
||||
# cron: "0 8 * * *"
|
||||
# filters:
|
||||
# branches:
|
||||
# only:
|
||||
# - master
|
||||
# jobs:
|
||||
# - cleanup-gke-jobs
|
||||
# - run_examples_tpu
|
||||
|
@ -3,6 +3,7 @@ cd docs
|
||||
function deploy_doc(){
|
||||
echo "Creating doc at commit $1 and pushing to folder $2"
|
||||
git checkout $1
|
||||
pip install -U ..
|
||||
if [ ! -z "$2" ]
|
||||
then
|
||||
if [ "$2" == "master" ]; then
|
||||
@ -45,11 +46,20 @@ deploy_doc "6f5a12a" v2.7.0
|
||||
deploy_doc "11c3257" v2.8.0
|
||||
deploy_doc "e7cfc1a" v2.9.0
|
||||
deploy_doc "7cb203f" v2.9.1
|
||||
deploy_doc "10d7239" v2.10.0
|
||||
deploy_doc "10d7239" v2.10.0
|
||||
deploy_doc "b42586e" v2.11.0
|
||||
deploy_doc "7fb8bdf" v3.0.2
|
||||
deploy_doc "4b3ee9c" v3.1.0
|
||||
deploy_doc "3ebb1b3" v3.2.0
|
||||
deploy_doc "0613f05" v3.3.1
|
||||
deploy_doc "eb0e0ce" v3.4.0
|
||||
deploy_doc "818878d" # v3.5.1 Latest stable release
|
||||
deploy_doc "818878d" v3.5.1
|
||||
deploy_doc "c781171" v4.0.1
|
||||
deploy_doc "bfa4ccf" v4.1.1
|
||||
deploy_doc "7d9a9d0" v4.2.2
|
||||
deploy_doc "bae0c79" v4.3.3
|
||||
deploy_doc "c988db5" v4.4.0
|
||||
deploy_doc "c5d6a28" v4.4.1
|
||||
deploy_doc "6bc89ed" v4.4.2
|
||||
deploy_doc "4906a29" v4.5.0
|
||||
deploy_doc "4bae96e" # v4.5.1 Latest stable release
|
3
.gitattributes
vendored
Normal file
3
.gitattributes
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
*.py eol=lf
|
||||
*.rst eol=lf
|
||||
*.md eol=lf
|
67
.github/ISSUE_TEMPLATE/bug-report.md
vendored
67
.github/ISSUE_TEMPLATE/bug-report.md
vendored
@ -11,7 +11,7 @@ assignees: ''
|
||||
## Environment info
|
||||
<!-- You can run the command `transformers-cli env` and copy-and-paste its output below.
|
||||
Don't forget to fill out the missing fields in that output! -->
|
||||
|
||||
|
||||
- `transformers` version:
|
||||
- Platform:
|
||||
- Python version:
|
||||
@ -24,32 +24,45 @@ assignees: ''
|
||||
<!-- Your issue will be replied to more quickly if you can figure out the right person to tag with @
|
||||
If you know how to use git blame, that is the easiest way, otherwise, here is a rough guide of **who to tag**.
|
||||
Please tag fewer than 3 people.
|
||||
|
||||
albert, bert, GPT2, XLM: @LysandreJik
|
||||
tokenizers: @mfuntowicz
|
||||
Trainer: @sgugger
|
||||
Speed and Memory Benchmarks: @patrickvonplaten
|
||||
Model Cards: @julien-c
|
||||
TextGeneration: @TevenLeScao
|
||||
examples/distillation: @VictorSanh
|
||||
nlp datasets: [different repo](https://github.com/huggingface/nlp)
|
||||
rust tokenizers: [different repo](https://github.com/huggingface/tokenizers)
|
||||
Text Generation: @patrickvonplaten @TevenLeScao
|
||||
Blenderbot: @patrickvonplaten
|
||||
Bart: @patrickvonplaten
|
||||
Marian: @patrickvonplaten
|
||||
Pegasus: @patrickvonplaten
|
||||
mBART: @patrickvonplaten
|
||||
T5: @patrickvonplaten
|
||||
Longformer/Reformer: @patrickvonplaten
|
||||
TransfoXL/XLNet: @TevenLeScao
|
||||
RAG: @patrickvonplaten, @lhoestq
|
||||
FSMT: @stas00
|
||||
examples/seq2seq: @patil-suraj
|
||||
examples/bert-loses-patience: @JetRunner
|
||||
tensorflow: @jplu
|
||||
examples/token-classification: @stefan-it
|
||||
documentation: @sgugger
|
||||
|
||||
Models:
|
||||
|
||||
- albert, bert, xlm: @LysandreJik
|
||||
- blenderbot, bart, marian, pegasus, encoderdecoder, t5: @patrickvonplaten, @patil-suraj
|
||||
- longformer, reformer, transfoxl, xlnet: @patrickvonplaten
|
||||
- fsmt: @stas00
|
||||
- funnel: @sgugger
|
||||
- gpt2: @patrickvonplaten, @LysandreJik
|
||||
- rag: @patrickvonplaten, @lhoestq
|
||||
- tensorflow: @Rocketknight1
|
||||
|
||||
Library:
|
||||
|
||||
- benchmarks: @patrickvonplaten
|
||||
- deepspeed: @stas00
|
||||
- ray/raytune: @richardliaw, @amogkam
|
||||
- text generation: @patrickvonplaten
|
||||
- tokenizers: @LysandreJik
|
||||
- trainer: @sgugger
|
||||
- pipelines: @LysandreJik
|
||||
|
||||
Documentation: @sgugger
|
||||
|
||||
Model hub:
|
||||
|
||||
- for issues with a model report at https://discuss.huggingface.co/ and tag the model's creator.
|
||||
|
||||
HF projects:
|
||||
|
||||
- datasets: [different repo](https://github.com/huggingface/datasets)
|
||||
- rust tokenizers: [different repo](https://github.com/huggingface/tokenizers)
|
||||
|
||||
Examples:
|
||||
|
||||
- maintained examples (not research project or legacy): @sgugger, @patil-suraj
|
||||
- research_projects/bert-loses-patience: @JetRunner
|
||||
- research_projects/distillation: @VictorSanh
|
||||
|
||||
-->
|
||||
|
||||
## Information
|
||||
|
58
.github/PULL_REQUEST_TEMPLATE.md
vendored
58
.github/PULL_REQUEST_TEMPLATE.md
vendored
@ -30,33 +30,45 @@ Fixes # (issue)
|
||||
## Who can review?
|
||||
|
||||
Anyone in the community is free to review the PR once the tests have passed. Feel free to tag
|
||||
members/contributors which may be interested in your PR.
|
||||
members/contributors who may be interested in your PR.
|
||||
|
||||
<!-- Your PR will be replied to more quickly if you can figure out the right person to tag with @
|
||||
|
||||
If you know how to use git blame, that is the easiest way, otherwise, here is a rough guide of **who to tag**.
|
||||
Please tag fewer than 3 people.
|
||||
|
||||
albert, bert, XLM: @LysandreJik
|
||||
GPT2: @LysandreJik, @patrickvonplaten
|
||||
tokenizers: @mfuntowicz
|
||||
Trainer: @sgugger
|
||||
Benchmarks: @patrickvonplaten
|
||||
Model Cards: @julien-c
|
||||
examples/distillation: @VictorSanh
|
||||
nlp datasets: [different repo](https://github.com/huggingface/nlp)
|
||||
rust tokenizers: [different repo](https://github.com/huggingface/tokenizers)
|
||||
Text Generation: @patrickvonplaten, @TevenLeScao
|
||||
Blenderbot, Bart, Marian, Pegasus: @patrickvonplaten
|
||||
T5: @patrickvonplaten
|
||||
Rag: @patrickvonplaten, @lhoestq
|
||||
EncoderDecoder: @patrickvonplaten
|
||||
Longformer, Reformer: @patrickvonplaten
|
||||
TransfoXL, XLNet: @TevenLeScao, @patrickvonplaten
|
||||
examples/seq2seq: @patil-suraj
|
||||
examples/bert-loses-patience: @JetRunner
|
||||
tensorflow: @jplu
|
||||
examples/token-classification: @stefan-it
|
||||
documentation: @sgugger
|
||||
FSTM: @stas00
|
||||
Models:
|
||||
|
||||
- albert, bert, xlm: @LysandreJik
|
||||
- blenderbot, bart, marian, pegasus, encoderdecoder, t5: @patrickvonplaten, @patil-suraj
|
||||
- longformer, reformer, transfoxl, xlnet: @patrickvonplaten
|
||||
- fsmt: @stas00
|
||||
- funnel: @sgugger
|
||||
- gpt2: @patrickvonplaten, @LysandreJik
|
||||
- rag: @patrickvonplaten, @lhoestq
|
||||
- tensorflow: @LysandreJik
|
||||
|
||||
Library:
|
||||
|
||||
- benchmarks: @patrickvonplaten
|
||||
- deepspeed: @stas00
|
||||
- ray/raytune: @richardliaw, @amogkam
|
||||
- text generation: @patrickvonplaten
|
||||
- tokenizers: @n1t0, @LysandreJik
|
||||
- trainer: @sgugger
|
||||
- pipelines: @LysandreJik
|
||||
|
||||
Documentation: @sgugger
|
||||
|
||||
HF projects:
|
||||
|
||||
- datasets: [different repo](https://github.com/huggingface/datasets)
|
||||
- rust tokenizers: [different repo](https://github.com/huggingface/tokenizers)
|
||||
|
||||
Examples:
|
||||
|
||||
- maintained examples (not research project or legacy): @sgugger, @patil-suraj
|
||||
- research_projects/bert-loses-patience: @JetRunner
|
||||
- research_projects/distillation: @VictorSanh
|
||||
|
||||
-->
|
||||
|
1
.github/conda/build.sh
vendored
Normal file
1
.github/conda/build.sh
vendored
Normal file
@ -0,0 +1 @@
|
||||
$PYTHON setup.py install # Python command to install the script.
|
52
.github/conda/meta.yaml
vendored
Normal file
52
.github/conda/meta.yaml
vendored
Normal file
@ -0,0 +1,52 @@
|
||||
{% set name = "transformers" %}
|
||||
|
||||
package:
|
||||
name: "{{ name|lower }}"
|
||||
version: "{{ TRANSFORMERS_VERSION }}"
|
||||
|
||||
source:
|
||||
path: ../../
|
||||
|
||||
build:
|
||||
noarch: python
|
||||
|
||||
requirements:
|
||||
host:
|
||||
- python
|
||||
- pip
|
||||
- numpy >=1.17
|
||||
- dataclasses
|
||||
- importlib_metadata
|
||||
- huggingface_hub
|
||||
- packaging
|
||||
- filelock
|
||||
- requests
|
||||
- tqdm >=4.27
|
||||
- sacremoses
|
||||
- regex !=2019.12.17
|
||||
- protobuf
|
||||
- tokenizers >=0.10.1,<0.11.0
|
||||
run:
|
||||
- python
|
||||
- numpy >=1.17
|
||||
- dataclasses
|
||||
- importlib_metadata
|
||||
- huggingface_hub
|
||||
- packaging
|
||||
- filelock
|
||||
- requests
|
||||
- tqdm >=4.27
|
||||
- sacremoses
|
||||
- regex !=2019.12.17
|
||||
- protobuf
|
||||
- tokenizers >=0.10.1,<0.11.0
|
||||
|
||||
test:
|
||||
imports:
|
||||
- transformers
|
||||
|
||||
about:
|
||||
home: https://huggingface.co
|
||||
license: Apache License 2.0
|
||||
license_file: LICENSE
|
||||
summary: "🤗Transformers: State-of-the-art Natural Language Processing for Pytorch and TensorFlow 2.0."
|
17
.github/stale.yml
vendored
17
.github/stale.yml
vendored
@ -1,17 +0,0 @@
|
||||
# Number of days of inactivity before an issue becomes stale
|
||||
daysUntilStale: 60
|
||||
# Number of days of inactivity before a stale issue is closed
|
||||
daysUntilClose: 7
|
||||
# Issues with these labels will never be considered stale
|
||||
exemptLabels:
|
||||
- pinned
|
||||
- security
|
||||
# Label to use when marking an issue as stale
|
||||
staleLabel: wontfix
|
||||
# Comment to post when marking an issue as stale. Set to `false` to disable
|
||||
markComment: >
|
||||
This issue has been automatically marked as stale because it has not had
|
||||
recent activity. It will be closed if no further activity occurs. Thank you
|
||||
for your contributions.
|
||||
# Comment to post when closing a stale issue. Set to `false` to disable
|
||||
closeComment: false
|
8
.github/workflows/github-torch-hub.yml
vendored
8
.github/workflows/github-torch-hub.yml
vendored
@ -1,6 +1,6 @@
|
||||
name: Torch hub integration
|
||||
|
||||
on:
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- "*"
|
||||
@ -32,8 +32,10 @@ jobs:
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
pip install --upgrade pip
|
||||
pip install torch
|
||||
pip install numpy filelock protobuf requests tqdm regex sentencepiece sacremoses tokenizers packaging
|
||||
# install torch-hub specific dependencies
|
||||
pip install -e git+https://github.com/huggingface/transformers.git#egg=transformers[torchhub]
|
||||
# no longer needed
|
||||
pip uninstall -y transformers
|
||||
|
||||
- name: Torch hub list
|
||||
run: |
|
||||
|
73
.github/workflows/model-templates.yml
vendored
Normal file
73
.github/workflows/model-templates.yml
vendored
Normal file
@ -0,0 +1,73 @@
|
||||
name: Model templates runner
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
pull_request:
|
||||
paths:
|
||||
- "src/**"
|
||||
- "tests/**"
|
||||
- ".github/**"
|
||||
- "templates/**"
|
||||
types: [assigned, opened, synchronize, reopened]
|
||||
|
||||
jobs:
|
||||
run_tests_templates:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v1
|
||||
|
||||
- name: Install Python
|
||||
uses: actions/setup-python@v1
|
||||
with:
|
||||
python-version: 3.6
|
||||
|
||||
- name: Loading cache.
|
||||
uses: actions/cache@v2
|
||||
id: cache
|
||||
with:
|
||||
path: ~/.cache/pip
|
||||
key: v1.2-tests_templates
|
||||
restore-keys: |
|
||||
v1.2-tests_templates-${{ hashFiles('setup.py') }}
|
||||
v1.2-tests_templates
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
pip install --upgrade pip
|
||||
sudo apt -y update && sudo apt install -y libsndfile1-dev
|
||||
pip install .[dev]
|
||||
- name: Create model files
|
||||
run: |
|
||||
transformers-cli add-new-model --testing --testing_file=templates/adding_a_new_model/tests/encoder-bert-tokenizer.json --path=templates/adding_a_new_model
|
||||
transformers-cli add-new-model --testing --testing_file=templates/adding_a_new_model/tests/pt-encoder-bert-tokenizer.json --path=templates/adding_a_new_model
|
||||
transformers-cli add-new-model --testing --testing_file=templates/adding_a_new_model/tests/standalone.json --path=templates/adding_a_new_model
|
||||
transformers-cli add-new-model --testing --testing_file=templates/adding_a_new_model/tests/tf-encoder-bert-tokenizer.json --path=templates/adding_a_new_model
|
||||
transformers-cli add-new-model --testing --testing_file=templates/adding_a_new_model/tests/tf-seq-2-seq-bart-tokenizer.json --path=templates/adding_a_new_model
|
||||
transformers-cli add-new-model --testing --testing_file=templates/adding_a_new_model/tests/pt-seq-2-seq-bart-tokenizer.json --path=templates/adding_a_new_model
|
||||
make style
|
||||
python utils/check_table.py --fix_and_overwrite
|
||||
python utils/check_dummies.py --fix_and_overwrite
|
||||
python utils/check_copies.py --fix_and_overwrite
|
||||
|
||||
- name: Run all non-slow tests
|
||||
run: |
|
||||
python -m pytest -n 2 --dist=loadfile -s --make-reports=tests_templates tests/*template*
|
||||
|
||||
- name: Run style changes
|
||||
run: |
|
||||
git fetch origin master:master
|
||||
make fixup
|
||||
|
||||
- name: Failure short reports
|
||||
if: ${{ always() }}
|
||||
run: cat reports/tests_templates_failures_short.txt
|
||||
|
||||
- name: Test suite reports artifacts
|
||||
if: ${{ always() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: run_all_tests_templates_test_reports
|
||||
path: reports
|
45
.github/workflows/release-conda.yml
vendored
Normal file
45
.github/workflows/release-conda.yml
vendored
Normal file
@ -0,0 +1,45 @@
|
||||
name: Release - Conda
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- v*
|
||||
|
||||
env:
|
||||
ANACONDA_API_TOKEN: ${{ secrets.ANACONDA_API_TOKEN }}
|
||||
|
||||
jobs:
|
||||
build_and_package:
|
||||
runs-on: ubuntu-latest
|
||||
defaults:
|
||||
run:
|
||||
shell: bash -l {0}
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v1
|
||||
|
||||
- name: Install miniconda
|
||||
uses: conda-incubator/setup-miniconda@v2
|
||||
with:
|
||||
auto-update-conda: true
|
||||
auto-activate-base: false
|
||||
python-version: 3.8
|
||||
activate-environment: "build-transformers"
|
||||
channels: huggingface
|
||||
|
||||
- name: Setup conda env
|
||||
run: |
|
||||
conda install -c defaults anaconda-client conda-build
|
||||
|
||||
- name: Extract version
|
||||
run: echo "TRANSFORMERS_VERSION=`python setup.py --version`" >> $GITHUB_ENV
|
||||
|
||||
- name: Build conda packages
|
||||
run: |
|
||||
conda info
|
||||
conda list
|
||||
conda-build .github/conda
|
||||
|
||||
- name: Upload to Anaconda
|
||||
run: anaconda upload `conda-build .github/conda --output` --force
|
326
.github/workflows/self-push.yml
vendored
326
.github/workflows/self-push.yml
vendored
@ -4,148 +4,98 @@ on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- model-templates
|
||||
- ci_*
|
||||
- ci-*
|
||||
paths:
|
||||
- "src/**"
|
||||
- "tests/**"
|
||||
- ".github/**"
|
||||
- "templates/**"
|
||||
# pull_request:
|
||||
repository_dispatch:
|
||||
|
||||
env:
|
||||
HF_HOME: /mnt/cache
|
||||
TRANSFORMERS_IS_CI: yes
|
||||
OMP_NUM_THREADS: 8
|
||||
MKL_NUM_THREADS: 8
|
||||
|
||||
jobs:
|
||||
run_tests_torch_gpu:
|
||||
runs-on: [self-hosted, gpu, single-gpu]
|
||||
runs-on: [self-hosted, docker-gpu, single-gpu]
|
||||
container:
|
||||
image: pytorch/pytorch:1.8.0-cuda11.1-cudnn8-runtime
|
||||
options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Python version
|
||||
- name: Launcher docker
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: NVIDIA-SMI
|
||||
run: |
|
||||
which python
|
||||
python --version
|
||||
pip --version
|
||||
|
||||
- name: Current dir
|
||||
run: pwd
|
||||
- run: nvidia-smi
|
||||
|
||||
- name: Loading cache.
|
||||
uses: actions/cache@v2
|
||||
id: cache
|
||||
with:
|
||||
path: .env
|
||||
key: v1.1-tests_torch_gpu-${{ hashFiles('setup.py') }}
|
||||
|
||||
- name: Create new python env (on self-hosted runners we have to handle isolation ourselves)
|
||||
run: |
|
||||
python -m venv .env
|
||||
source .env/bin/activate
|
||||
which python
|
||||
python --version
|
||||
pip --version
|
||||
nvidia-smi
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
source .env/bin/activate
|
||||
apt -y update && apt install -y libsndfile1-dev
|
||||
pip install --upgrade pip
|
||||
pip install .[torch,sklearn,testing,onnxruntime,sentencepiece]
|
||||
pip install git+https://github.com/huggingface/datasets
|
||||
pip install .[sklearn,testing,onnxruntime,sentencepiece,speech]
|
||||
|
||||
- name: Are GPUs recognized by our DL frameworks
|
||||
run: |
|
||||
source .env/bin/activate
|
||||
python -c "import torch; print('Cuda available:', torch.cuda.is_available())"
|
||||
python -c "import torch; print('Cuda version:', torch.version.cuda)"
|
||||
python -c "import torch; print('CuDNN version:', torch.backends.cudnn.version())"
|
||||
python -c "import torch; print('Number of GPUs available:', torch.cuda.device_count())"
|
||||
|
||||
- name: Create model files
|
||||
run: |
|
||||
source .env/bin/activate
|
||||
transformers-cli add-new-model --testing --testing_file=templates/adding_a_new_model/tests/encoder-bert-tokenizer.json --path=templates/adding_a_new_model
|
||||
transformers-cli add-new-model --testing --testing_file=templates/adding_a_new_model/tests/pt-encoder-bert-tokenizer.json --path=templates/adding_a_new_model
|
||||
transformers-cli add-new-model --testing --testing_file=templates/adding_a_new_model/tests/standalone.json --path=templates/adding_a_new_model
|
||||
transformers-cli add-new-model --testing --testing_file=templates/adding_a_new_model/tests/tf-encoder-bert-tokenizer.json --path=templates/adding_a_new_model
|
||||
|
||||
- name: Run all non-slow tests on GPU
|
||||
env:
|
||||
OMP_NUM_THREADS: 1
|
||||
CUDA_VISIBLE_DEVICES: 0
|
||||
run: |
|
||||
source .env/bin/activate
|
||||
python -m pytest -n 2 --dist=loadfile -s --make-reports=tests_torch_gpu tests
|
||||
python -m pytest -n 2 --dist=loadfile --make-reports=tests_torch_gpu tests
|
||||
|
||||
- name: Failure short reports
|
||||
if: ${{ always() }}
|
||||
run: cat reports/tests_torch_gpu_failures_short.txt
|
||||
|
||||
|
||||
- name: Test suite reports artifacts
|
||||
if: ${{ always() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: run_all_tests_torch_gpu_test_reports
|
||||
path: reports
|
||||
|
||||
|
||||
run_tests_tf_gpu:
|
||||
runs-on: [self-hosted, gpu, single-gpu]
|
||||
runs-on: [self-hosted, docker-gpu, single-gpu]
|
||||
timeout-minutes: 120
|
||||
container:
|
||||
image: tensorflow/tensorflow:2.4.1-gpu
|
||||
options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Python version
|
||||
run: |
|
||||
which python
|
||||
python --version
|
||||
pip --version
|
||||
- name: Current dir
|
||||
run: pwd
|
||||
- run: nvidia-smi
|
||||
- name: Launcher docker
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Loading cache.
|
||||
uses: actions/cache@v2
|
||||
id: cache
|
||||
with:
|
||||
path: .env
|
||||
key: v1.1-tests_tf_gpu-${{ hashFiles('setup.py') }}
|
||||
|
||||
- name: Create new python env (on self-hosted runners we have to handle isolation ourselves)
|
||||
- name: NVIDIA-SMI
|
||||
run: |
|
||||
python -m venv .env
|
||||
source .env/bin/activate
|
||||
which python
|
||||
python --version
|
||||
pip --version
|
||||
nvidia-smi
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
source .env/bin/activate
|
||||
pip install --upgrade pip
|
||||
pip install .[tf,sklearn,testing,onnxruntime,sentencepiece]
|
||||
pip install git+https://github.com/huggingface/datasets
|
||||
pip install .[sklearn,testing,onnxruntime,sentencepiece]
|
||||
|
||||
- name: Are GPUs recognized by our DL frameworks
|
||||
run: |
|
||||
source .env/bin/activate
|
||||
TF_CPP_MIN_LOG_LEVEL=3 python -c "import tensorflow as tf; print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU')))"
|
||||
TF_CPP_MIN_LOG_LEVEL=3 python -c "import tensorflow as tf; print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU')))"
|
||||
|
||||
- name: Create model files
|
||||
run: |
|
||||
source .env/bin/activate
|
||||
transformers-cli add-new-model --testing --testing_file=templates/adding_a_new_model/tests/encoder-bert-tokenizer.json --path=templates/adding_a_new_model
|
||||
transformers-cli add-new-model --testing --testing_file=templates/adding_a_new_model/tests/pt-encoder-bert-tokenizer.json --path=templates/adding_a_new_model
|
||||
transformers-cli add-new-model --testing --testing_file=templates/adding_a_new_model/tests/standalone.json --path=templates/adding_a_new_model
|
||||
transformers-cli add-new-model --testing --testing_file=templates/adding_a_new_model/tests/tf-encoder-bert-tokenizer.json --path=templates/adding_a_new_model
|
||||
|
||||
- name: Run all non-slow tests on GPU
|
||||
env:
|
||||
OMP_NUM_THREADS: 1
|
||||
CUDA_VISIBLE_DEVICES: 0
|
||||
TF_NUM_INTRAOP_THREADS: 8
|
||||
TF_NUM_INTEROP_THREADS: 1
|
||||
run: |
|
||||
source .env/bin/activate
|
||||
python -m pytest -n 2 --dist=loadfile -s --make-reports=tests_tf_gpu tests
|
||||
python -m pytest -n 1 --dist=loadfile --make-reports=tests_tf_gpu tests
|
||||
|
||||
- name: Failure short reports
|
||||
if: ${{ always() }}
|
||||
run: cat reports/tests_tf_gpu_failures_short.txt
|
||||
|
||||
|
||||
- name: Test suite reports artifacts
|
||||
if: ${{ always() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
@ -153,57 +103,42 @@ jobs:
|
||||
name: run_all_tests_tf_gpu_test_reports
|
||||
path: reports
|
||||
|
||||
|
||||
run_tests_torch_multi_gpu:
|
||||
runs-on: [self-hosted, gpu, multi-gpu]
|
||||
runs-on: [self-hosted, docker-gpu, multi-gpu]
|
||||
container:
|
||||
image: pytorch/pytorch:1.8.0-cuda11.1-cudnn8-runtime
|
||||
options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Python version
|
||||
- name: Launcher docker
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: NVIDIA-SMI
|
||||
run: |
|
||||
which python
|
||||
python --version
|
||||
pip --version
|
||||
nvidia-smi
|
||||
|
||||
- name: Current dir
|
||||
run: pwd
|
||||
- run: nvidia-smi
|
||||
|
||||
- name: Loading cache.
|
||||
uses: actions/cache@v2
|
||||
id: cache
|
||||
with:
|
||||
path: .env
|
||||
key: v1.1-tests_torch_multi_gpu-${{ hashFiles('setup.py') }}
|
||||
|
||||
- name: Create new python env (on self-hosted runners we have to handle isolation ourselves)
|
||||
run: |
|
||||
python -m venv .env
|
||||
source .env/bin/activate
|
||||
which python
|
||||
python --version
|
||||
pip --version
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
source .env/bin/activate
|
||||
apt -y update && apt install -y libsndfile1-dev
|
||||
pip install --upgrade pip
|
||||
pip install .[torch,sklearn,testing,onnxruntime,sentencepiece]
|
||||
pip install git+https://github.com/huggingface/datasets
|
||||
pip install .[sklearn,testing,onnxruntime,sentencepiece,speech]
|
||||
|
||||
- name: Are GPUs recognized by our DL frameworks
|
||||
run: |
|
||||
source .env/bin/activate
|
||||
python -c "import torch; print('Cuda available:', torch.cuda.is_available())"
|
||||
python -c "import torch; print('Cuda version:', torch.version.cuda)"
|
||||
python -c "import torch; print('CuDNN version:', torch.backends.cudnn.version())"
|
||||
python -c "import torch; print('Number of GPUs available:', torch.cuda.device_count())"
|
||||
|
||||
- name: Run all non-slow tests on GPU
|
||||
env:
|
||||
OMP_NUM_THREADS: 1
|
||||
MKL_SERVICE_FORCE_INTEL: 1
|
||||
run: |
|
||||
source .env/bin/activate
|
||||
python -m pytest -n 2 --dist=loadfile -s --make-reports=tests_torch_multi_gpu tests
|
||||
python -m pytest -n 2 --dist=loadfile --make-reports=tests_torch_multi_gpu tests
|
||||
|
||||
- name: Failure short reports
|
||||
if: ${{ always() }}
|
||||
run: cat reports/tests_torch_multi_gpu_failures_short.txt
|
||||
run: cat reports/tests_torch_multi_gpu_failures_short.txt
|
||||
|
||||
- name: Test suite reports artifacts
|
||||
if: ${{ always() }}
|
||||
@ -213,52 +148,35 @@ jobs:
|
||||
path: reports
|
||||
|
||||
run_tests_tf_multi_gpu:
|
||||
runs-on: [self-hosted, gpu, multi-gpu]
|
||||
runs-on: [self-hosted, docker-gpu, multi-gpu]
|
||||
timeout-minutes: 120
|
||||
container:
|
||||
image: tensorflow/tensorflow:2.4.1-gpu
|
||||
options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Python version
|
||||
- name: Launcher docker
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: NVIDIA-SMI
|
||||
run: |
|
||||
which python
|
||||
python --version
|
||||
pip --version
|
||||
nvidia-smi
|
||||
|
||||
- name: Current dir
|
||||
run: pwd
|
||||
- run: nvidia-smi
|
||||
|
||||
- name: Loading cache.
|
||||
uses: actions/cache@v2
|
||||
id: cache
|
||||
with:
|
||||
path: .env
|
||||
key: v1.1-tests_tf_multi_gpu-${{ hashFiles('setup.py') }}
|
||||
|
||||
- name: Create new python env (on self-hosted runners we have to handle isolation ourselves)
|
||||
run: |
|
||||
python -m venv .env
|
||||
source .env/bin/activate
|
||||
which python
|
||||
python --version
|
||||
pip --version
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
source .env/bin/activate
|
||||
pip install --upgrade pip
|
||||
pip install .[tf,sklearn,testing,onnxruntime,sentencepiece]
|
||||
pip install git+https://github.com/huggingface/datasets
|
||||
pip install .[sklearn,testing,onnxruntime,sentencepiece]
|
||||
|
||||
- name: Are GPUs recognized by our DL frameworks
|
||||
run: |
|
||||
source .env/bin/activate
|
||||
TF_CPP_MIN_LOG_LEVEL=3 python -c "import tensorflow as tf; print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU')))"
|
||||
TF_CPP_MIN_LOG_LEVEL=3 python -c "import tensorflow as tf; print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU')))"
|
||||
|
||||
- name: Run all non-slow tests on GPU
|
||||
env:
|
||||
OMP_NUM_THREADS: 1
|
||||
TF_NUM_INTRAOP_THREADS: 8
|
||||
TF_NUM_INTEROP_THREADS: 1
|
||||
run: |
|
||||
source .env/bin/activate
|
||||
python -m pytest -n 2 --dist=loadfile -s --make-reports=tests_tf_multi_gpu tests
|
||||
python -m pytest -n 1 --dist=loadfile --make-reports=tests_tf_multi_gpu tests
|
||||
|
||||
- name: Failure short reports
|
||||
if: ${{ always() }}
|
||||
@ -270,4 +188,112 @@ jobs:
|
||||
with:
|
||||
name: run_all_tests_tf_multi_gpu_test_reports
|
||||
path: reports
|
||||
|
||||
|
||||
run_tests_torch_cuda_extensions_gpu:
|
||||
runs-on: [self-hosted, docker-gpu, single-gpu]
|
||||
container:
|
||||
image: nvcr.io/nvidia/pytorch:21.03-py3
|
||||
options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||
steps:
|
||||
- name: Launcher docker
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: NVIDIA-SMI
|
||||
run: |
|
||||
nvidia-smi
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
apt -y update && apt install -y libaio-dev
|
||||
pip install --upgrade pip
|
||||
pip install .[testing,deepspeed]
|
||||
|
||||
- name: Are GPUs recognized by our DL frameworks
|
||||
run: |
|
||||
python -c "import torch; print('Cuda available:', torch.cuda.is_available())"
|
||||
python -c "import torch; print('Cuda version:', torch.version.cuda)"
|
||||
python -c "import torch; print('CuDNN version:', torch.backends.cudnn.version())"
|
||||
python -c "import torch; print('Number of GPUs available:', torch.cuda.device_count())"
|
||||
|
||||
- name: Run all tests on GPU
|
||||
run: |
|
||||
python -m pytest -n 1 --dist=loadfile --make-reports=tests_torch_cuda_extensions_gpu tests/deepspeed tests/extended
|
||||
|
||||
- name: Failure short reports
|
||||
if: ${{ always() }}
|
||||
run: cat reports/tests_torch_cuda_extensions_gpu_failures_short.txt
|
||||
|
||||
- name: Test suite reports artifacts
|
||||
if: ${{ always() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: run_tests_torch_cuda_extensions_gpu_test_reports
|
||||
path: reports
|
||||
|
||||
run_tests_torch_cuda_extensions_multi_gpu:
|
||||
runs-on: [self-hosted, docker-gpu, multi-gpu]
|
||||
container:
|
||||
image: nvcr.io/nvidia/pytorch:21.03-py3
|
||||
options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||
steps:
|
||||
- name: Launcher docker
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: NVIDIA-SMI
|
||||
run: |
|
||||
nvidia-smi
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
apt -y update && apt install -y libaio-dev
|
||||
pip install --upgrade pip
|
||||
pip install .[testing,deepspeed,fairscale]
|
||||
|
||||
- name: Are GPUs recognized by our DL frameworks
|
||||
run: |
|
||||
python -c "import torch; print('Cuda available:', torch.cuda.is_available())"
|
||||
python -c "import torch; print('Cuda version:', torch.version.cuda)"
|
||||
python -c "import torch; print('CuDNN version:', torch.backends.cudnn.version())"
|
||||
python -c "import torch; print('Number of GPUs available:', torch.cuda.device_count())"
|
||||
|
||||
- name: Run all tests on GPU
|
||||
run: |
|
||||
python -m pytest -n 1 --dist=loadfile --make-reports=tests_torch_cuda_extensions_multi_gpu tests/deepspeed tests/extended
|
||||
|
||||
- name: Failure short reports
|
||||
if: ${{ always() }}
|
||||
run: cat reports/tests_torch_cuda_extensions_multi_gpu_failures_short.txt
|
||||
|
||||
- name: Test suite reports artifacts
|
||||
if: ${{ always() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: run_tests_torch_cuda_extensions_multi_gpu_test_reports
|
||||
path: reports
|
||||
|
||||
|
||||
send_results:
|
||||
name: Send results to webhook
|
||||
runs-on: ubuntu-latest
|
||||
if: always()
|
||||
needs: [
|
||||
run_tests_torch_gpu,
|
||||
run_tests_tf_gpu,
|
||||
run_tests_torch_multi_gpu,
|
||||
run_tests_tf_multi_gpu,
|
||||
run_tests_torch_cuda_extensions_gpu,
|
||||
run_tests_torch_cuda_extensions_multi_gpu
|
||||
]
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- uses: actions/download-artifact@v2
|
||||
|
||||
- name: Send message to Slack
|
||||
env:
|
||||
CI_SLACK_BOT_TOKEN: ${{ secrets.CI_SLACK_BOT_TOKEN }}
|
||||
CI_SLACK_CHANNEL_ID: ${{ secrets.CI_SLACK_CHANNEL_ID }}
|
||||
|
||||
run: |
|
||||
pip install slack_sdk
|
||||
python utils/notification_service.py push
|
||||
|
386
.github/workflows/self-scheduled.yml
vendored
386
.github/workflows/self-scheduled.yml
vendored
@ -1,86 +1,66 @@
|
||||
# configuration notes:
|
||||
#
|
||||
# - `source .env/bin/activate` is currently needed to be run first thing first in each step. Otherwise
|
||||
# the step uses the system-wide python interpreter.
|
||||
|
||||
name: Self-hosted runner (scheduled)
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- ci_*
|
||||
- framework-agnostic-tokenizers
|
||||
- multi_ci_*
|
||||
repository_dispatch:
|
||||
schedule:
|
||||
- cron: "0 0 * * *"
|
||||
|
||||
env:
|
||||
HF_HOME: /mnt/cache
|
||||
TRANSFORMERS_IS_CI: yes
|
||||
RUN_SLOW: yes
|
||||
OMP_NUM_THREADS: 16
|
||||
MKL_NUM_THREADS: 16
|
||||
|
||||
jobs:
|
||||
run_all_tests_torch_gpu:
|
||||
runs-on: [self-hosted, gpu, single-gpu]
|
||||
runs-on: [self-hosted, docker-gpu, single-gpu]
|
||||
container:
|
||||
image: pytorch/pytorch:1.8.0-cuda11.1-cudnn8-runtime
|
||||
options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Launcher docker
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Loading cache.
|
||||
uses: actions/cache@v2
|
||||
id: cache
|
||||
with:
|
||||
path: .env
|
||||
key: v 1.1-slow_tests_torch_gpu-${{ hashFiles('setup.py') }}
|
||||
|
||||
- name: Python version
|
||||
- name: NVIDIA-SMI
|
||||
run: |
|
||||
which python
|
||||
python --version
|
||||
pip --version
|
||||
|
||||
- name: Current dir
|
||||
run: pwd
|
||||
- run: nvidia-smi
|
||||
|
||||
- name: Create new python env (on self-hosted runners we have to handle isolation ourselves)
|
||||
if: steps.cache.outputs.cache-hit != 'true'
|
||||
run: |
|
||||
python -m venv .env
|
||||
source .env/bin/activate
|
||||
which python
|
||||
python --version
|
||||
pip --version
|
||||
nvidia-smi
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
source .env/bin/activate
|
||||
apt -y update && apt install -y libsndfile1-dev
|
||||
pip install --upgrade pip
|
||||
pip install .[torch,sklearn,testing,onnxruntime,sentencepiece]
|
||||
pip install git+https://github.com/huggingface/datasets
|
||||
pip list
|
||||
pip install .[sklearn,testing,onnxruntime,sentencepiece,speech]
|
||||
|
||||
- name: Are GPUs recognized by our DL frameworks
|
||||
run: |
|
||||
source .env/bin/activate
|
||||
python -c "import torch; print('Cuda available:', torch.cuda.is_available())"
|
||||
python -c "import torch; print('Cuda version:', torch.version.cuda)"
|
||||
python -c "import torch; print('CuDNN version:', torch.backends.cudnn.version())"
|
||||
python -c "import torch; print('Number of GPUs available:', torch.cuda.device_count())"
|
||||
|
||||
- name: Run all tests on GPU
|
||||
env:
|
||||
OMP_NUM_THREADS: 1
|
||||
RUN_SLOW: yes
|
||||
run: |
|
||||
source .env/bin/activate
|
||||
python -m pytest -n 1 --dist=loadfile -s --make-reports=tests_torch_gpu tests
|
||||
python -m pytest -n 1 --dist=loadfile --make-reports=tests_torch_gpu tests
|
||||
|
||||
- name: Failure short reports
|
||||
if: ${{ always() }}
|
||||
run: cat reports/tests_torch_gpu_failures_short.txt
|
||||
|
||||
|
||||
- name: Run examples tests on GPU
|
||||
if: ${{ always() }}
|
||||
env:
|
||||
OMP_NUM_THREADS: 1
|
||||
OMP_NUM_THREADS: 16
|
||||
MKL_NUM_THREADS: 16
|
||||
RUN_SLOW: yes
|
||||
HF_HOME: /mnt/cache
|
||||
TRANSFORMERS_IS_CI: yes
|
||||
run: |
|
||||
source .env/bin/activate
|
||||
pip install -r examples/requirements.txt
|
||||
python -m pytest -n 1 --dist=loadfile -s --make-reports=examples_torch_gpu examples
|
||||
pip install -r examples/pytorch/_tests_requirements.txt
|
||||
python -m pytest -n 1 --dist=loadfile --make-reports=examples_torch_gpu examples
|
||||
|
||||
- name: Failure short reports
|
||||
if: ${{ always() }}
|
||||
@ -89,13 +69,9 @@ jobs:
|
||||
- name: Run all pipeline tests on GPU
|
||||
if: ${{ always() }}
|
||||
env:
|
||||
TF_FORCE_GPU_ALLOW_GROWTH: "true"
|
||||
OMP_NUM_THREADS: 1
|
||||
RUN_SLOW: yes
|
||||
RUN_PIPELINE_TESTS: yes
|
||||
run: |
|
||||
source .env/bin/activate
|
||||
python -m pytest -n 1 --dist=loadfile -s -m is_pipeline_test --make-reports=tests_torch_pipeline_gpu tests
|
||||
python -m pytest -n 1 --dist=loadfile -m is_pipeline_test --make-reports=tests_torch_pipeline_gpu tests
|
||||
|
||||
- name: Failure short reports
|
||||
if: ${{ always() }}
|
||||
@ -108,60 +84,36 @@ jobs:
|
||||
name: run_all_tests_torch_gpu_test_reports
|
||||
path: reports
|
||||
|
||||
|
||||
run_all_tests_tf_gpu:
|
||||
runs-on: [self-hosted, gpu, single-gpu]
|
||||
runs-on: [self-hosted, docker-gpu, single-gpu]
|
||||
container:
|
||||
image: tensorflow/tensorflow:2.4.1-gpu
|
||||
options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Launcher docker
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Loading cache.
|
||||
uses: actions/cache@v2
|
||||
id: cache
|
||||
with:
|
||||
path: .env
|
||||
key: v1.1-slow_tests_tf_gpu-${{ hashFiles('setup.py') }}
|
||||
|
||||
- name: Python version
|
||||
- name: NVIDIA-SMI
|
||||
run: |
|
||||
which python
|
||||
python --version
|
||||
pip --version
|
||||
|
||||
- name: Current dir
|
||||
run: pwd
|
||||
- run: nvidia-smi
|
||||
|
||||
- name: Create new python env (on self-hosted runners we have to handle isolation ourselves)
|
||||
if: steps.cache.outputs.cache-hit != 'true'
|
||||
run: |
|
||||
python -m venv .env
|
||||
source .env/bin/activate
|
||||
which python
|
||||
python --version
|
||||
pip --version
|
||||
nvidia-smi
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
source .env/bin/activate
|
||||
pip install --upgrade pip
|
||||
pip install .[tf,sklearn,testing,onnxruntime,sentencepiece]
|
||||
pip install git+https://github.com/huggingface/datasets
|
||||
pip list
|
||||
pip install .[sklearn,testing,onnx,sentencepiece]
|
||||
|
||||
- name: Are GPUs recognized by our DL frameworks
|
||||
run: |
|
||||
source .env/bin/activate
|
||||
TF_CPP_MIN_LOG_LEVEL=3 python -c "import tensorflow as tf; print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU')))"
|
||||
TF_CPP_MIN_LOG_LEVEL=3 python -c "import tensorflow as tf; print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU')))"
|
||||
|
||||
- name: Run all tests on GPU
|
||||
env:
|
||||
OMP_NUM_THREADS: 1
|
||||
RUN_SLOW: yes
|
||||
TF_NUM_INTEROP_THREADS: 1
|
||||
TF_NUM_INTRAOP_THREADS: 16
|
||||
run: |
|
||||
source .env/bin/activate
|
||||
python -m pytest -n 1 --dist=loadfile -s --make-reports=tests_tf_gpu tests
|
||||
|
||||
python -m pytest -n 1 --dist=loadfile --make-reports=tests_tf_gpu tests
|
||||
|
||||
- name: Failure short reports
|
||||
if: ${{ always() }}
|
||||
run: cat reports/tests_tf_gpu_failures_short.txt
|
||||
@ -169,17 +121,15 @@ jobs:
|
||||
- name: Run all pipeline tests on GPU
|
||||
if: ${{ always() }}
|
||||
env:
|
||||
TF_FORCE_GPU_ALLOW_GROWTH: "true"
|
||||
OMP_NUM_THREADS: 1
|
||||
RUN_SLOW: yes
|
||||
RUN_PIPELINE_TESTS: yes
|
||||
TF_NUM_INTEROP_THREADS: 1
|
||||
TF_NUM_INTRAOP_THREADS: 16
|
||||
run: |
|
||||
source .env/bin/activate
|
||||
python -m pytest -n 1 --dist=loadfile -s -m is_pipeline_test --make-reports=tests_tf_pipelines_gpu tests
|
||||
python -m pytest -n 1 --dist=loadfile -m is_pipeline_test --make-reports=tests_tf_pipeline_gpu tests
|
||||
|
||||
- name: Failure short reports
|
||||
if: ${{ always() }}
|
||||
run: cat reports/tests_tf_pipelines_gpu_failures_short.txt
|
||||
run: cat reports/tests_tf_pipeline_gpu_failures_short.txt
|
||||
|
||||
- name: Test suite reports artifacts
|
||||
if: ${{ always() }}
|
||||
@ -187,86 +137,49 @@ jobs:
|
||||
with:
|
||||
name: run_all_tests_tf_gpu_test_reports
|
||||
path: reports
|
||||
|
||||
|
||||
run_all_tests_torch_multi_gpu:
|
||||
runs-on: [self-hosted, gpu, multi-gpu]
|
||||
runs-on: [self-hosted, docker-gpu, multi-gpu]
|
||||
container:
|
||||
image: pytorch/pytorch:1.8.0-cuda11.1-cudnn8-runtime
|
||||
options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Launcher docker
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Loading cache.
|
||||
uses: actions/cache@v2
|
||||
id: cache
|
||||
with:
|
||||
path: .env
|
||||
key: v1.1-slow_tests_torch_multi_gpu-${{ hashFiles('setup.py') }}
|
||||
|
||||
- name: Python version
|
||||
- name: NVIDIA-SMI
|
||||
run: |
|
||||
which python
|
||||
python --version
|
||||
pip --version
|
||||
|
||||
- name: Current dir
|
||||
run: pwd
|
||||
- run: nvidia-smi
|
||||
|
||||
- name: Create new python env (on self-hosted runners we have to handle isolation ourselves)
|
||||
if: steps.cache.outputs.cache-hit != 'true'
|
||||
run: |
|
||||
python -m venv .env
|
||||
source .env/bin/activate
|
||||
which python
|
||||
python --version
|
||||
pip --version
|
||||
nvidia-smi
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
source .env/bin/activate
|
||||
apt -y update && apt install -y libsndfile1-dev
|
||||
pip install --upgrade pip
|
||||
pip install .[torch,sklearn,testing,onnxruntime,sentencepiece]
|
||||
pip install git+https://github.com/huggingface/datasets
|
||||
pip list
|
||||
pip install .[sklearn,testing,onnxruntime,sentencepiece,speech]
|
||||
|
||||
- name: Are GPUs recognized by our DL frameworks
|
||||
run: |
|
||||
source .env/bin/activate
|
||||
python -c "import torch; print('Cuda available:', torch.cuda.is_available())"
|
||||
python -c "import torch; print('Cuda version:', torch.version.cuda)"
|
||||
python -c "import torch; print('CuDNN version:', torch.backends.cudnn.version())"
|
||||
python -c "import torch; print('Number of GPUs available:', torch.cuda.device_count())"
|
||||
|
||||
- name: Run all tests on multi-GPU
|
||||
- name: Run all tests on GPU
|
||||
env:
|
||||
OMP_NUM_THREADS: 1
|
||||
RUN_SLOW: yes
|
||||
MKL_SERVICE_FORCE_INTEL: 1
|
||||
run: |
|
||||
source .env/bin/activate
|
||||
python -m pytest -n 1 --dist=loadfile -s --make-reports=tests_torch_multi_gpu tests
|
||||
python -m pytest -n 1 --dist=loadfile --make-reports=tests_torch_multi_gpu tests
|
||||
|
||||
- name: Failure short reports
|
||||
if: ${{ always() }}
|
||||
run: cat reports/tests_torch_multi_gpu_failures_short.txt
|
||||
|
||||
- name: Run examples tests on multi-GPU
|
||||
env:
|
||||
OMP_NUM_THREADS: 1
|
||||
RUN_SLOW: yes
|
||||
run: |
|
||||
source .env/bin/activate
|
||||
python -m pytest -n 1 --dist=loadfile -s --make-reports=tests_torch_examples_multi_gpu examples
|
||||
|
||||
- name: Failure short reports
|
||||
if: ${{ always() }}
|
||||
run: cat reports/tests_torch_examples_multi_gpu_failures_short.txt
|
||||
|
||||
- name: Run all pipeline tests on multi-GPU
|
||||
- name: Run all pipeline tests on GPU
|
||||
if: ${{ always() }}
|
||||
env:
|
||||
TF_FORCE_GPU_ALLOW_GROWTH: "true"
|
||||
OMP_NUM_THREADS: 1
|
||||
RUN_SLOW: yes
|
||||
RUN_PIPELINE_TESTS: yes
|
||||
run: |
|
||||
source .env/bin/activate
|
||||
python -m pytest -n 1 --dist=loadfile -s -m is_pipeline_test --make-reports=tests_torch_pipeline_multi_gpu tests
|
||||
python -m pytest -n 1 --dist=loadfile -m is_pipeline_test --make-reports=tests_torch_pipeline_multi_gpu tests
|
||||
|
||||
- name: Failure short reports
|
||||
if: ${{ always() }}
|
||||
@ -280,73 +193,48 @@ jobs:
|
||||
path: reports
|
||||
|
||||
run_all_tests_tf_multi_gpu:
|
||||
runs-on: [self-hosted, gpu, multi-gpu]
|
||||
runs-on: [self-hosted, docker-gpu, multi-gpu]
|
||||
container:
|
||||
image: tensorflow/tensorflow:2.4.1-gpu
|
||||
options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Launcher docker
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Loading cache.
|
||||
uses: actions/cache@v2
|
||||
id: cache
|
||||
with:
|
||||
path: .env
|
||||
key: v1.1-slow_tests_tf_multi_gpu-${{ hashFiles('setup.py') }}
|
||||
|
||||
- name: Python version
|
||||
- name: NVIDIA-SMI
|
||||
run: |
|
||||
which python
|
||||
python --version
|
||||
pip --version
|
||||
|
||||
- name: Current dir
|
||||
run: pwd
|
||||
- run: nvidia-smi
|
||||
|
||||
- name: Create new python env (on self-hosted runners we have to handle isolation ourselves)
|
||||
if: steps.cache.outputs.cache-hit != 'true'
|
||||
run: |
|
||||
python -m venv .env
|
||||
source .env/bin/activate
|
||||
which python
|
||||
python --version
|
||||
pip --version
|
||||
nvidia-smi
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
source .env/bin/activate
|
||||
pip install --upgrade pip
|
||||
pip install .[tf,sklearn,testing,onnxruntime,sentencepiece]
|
||||
pip install git+https://github.com/huggingface/datasets
|
||||
pip list
|
||||
pip install .[sklearn,testing,onnx,sentencepiece]
|
||||
|
||||
- name: Are GPUs recognized by our DL frameworks
|
||||
run: |
|
||||
source .env/bin/activate
|
||||
TF_CPP_MIN_LOG_LEVEL=3 python -c "import tensorflow as tf; print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU')))"
|
||||
TF_CPP_MIN_LOG_LEVEL=3 python -c "import tensorflow as tf; print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU')))"
|
||||
|
||||
- name: Run all tests on multi-GPU
|
||||
- name: Run all tests on GPU
|
||||
env:
|
||||
OMP_NUM_THREADS: 1
|
||||
RUN_SLOW: yes
|
||||
TF_NUM_INTEROP_THREADS: 1
|
||||
TF_NUM_INTRAOP_THREADS: 16
|
||||
run: |
|
||||
source .env/bin/activate
|
||||
python -m pytest -n 1 --dist=loadfile -s --make-reports=tests_tf_multi_gpu tests
|
||||
python -m pytest -n 1 --dist=loadfile --make-reports=tests_tf_multi_gpu tests
|
||||
|
||||
- name: Failure short reports
|
||||
if: ${{ always() }}
|
||||
run: cat reports/tests_tf_multi_gpu_failures_short.txt
|
||||
|
||||
- name: Run all pipeline tests on multi-GPU
|
||||
- name: Run all pipeline tests on GPU
|
||||
if: ${{ always() }}
|
||||
env:
|
||||
TF_FORCE_GPU_ALLOW_GROWTH: "true"
|
||||
OMP_NUM_THREADS: 1
|
||||
RUN_SLOW: yes
|
||||
RUN_PIPELINE_TESTS: yes
|
||||
TF_NUM_INTEROP_THREADS: 1
|
||||
TF_NUM_INTRAOP_THREADS: 16
|
||||
run: |
|
||||
source .env/bin/activate
|
||||
python -m pytest -n 1 --dist=loadfile -s -m is_pipeline_test --make-reports=tests_tf_pipeline_multi_gpu tests
|
||||
|
||||
python -m pytest -n 1 --dist=loadfile -m is_pipeline_test --make-reports=tests_tf_pipeline_multi_gpu tests
|
||||
|
||||
- name: Failure short reports
|
||||
if: ${{ always() }}
|
||||
run: cat reports/tests_tf_pipeline_multi_gpu_failures_short.txt
|
||||
@ -357,4 +245,112 @@ jobs:
|
||||
with:
|
||||
name: run_all_tests_tf_multi_gpu_test_reports
|
||||
path: reports
|
||||
|
||||
|
||||
run_all_tests_torch_cuda_extensions_gpu:
|
||||
runs-on: [self-hosted, docker-gpu, single-gpu]
|
||||
container:
|
||||
image: nvcr.io/nvidia/pytorch:21.03-py3
|
||||
options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||
steps:
|
||||
- name: Launcher docker
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: NVIDIA-SMI
|
||||
run: |
|
||||
nvidia-smi
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
apt -y update && apt install -y libaio-dev
|
||||
pip install --upgrade pip
|
||||
pip install .[testing,deepspeed]
|
||||
|
||||
- name: Are GPUs recognized by our DL frameworks
|
||||
run: |
|
||||
python -c "import torch; print('Cuda available:', torch.cuda.is_available())"
|
||||
python -c "import torch; print('Cuda version:', torch.version.cuda)"
|
||||
python -c "import torch; print('CuDNN version:', torch.backends.cudnn.version())"
|
||||
python -c "import torch; print('Number of GPUs available:', torch.cuda.device_count())"
|
||||
|
||||
- name: Run all tests on GPU
|
||||
run: |
|
||||
python -m pytest -n 1 --dist=loadfile --make-reports=tests_torch_cuda_extensions_gpu tests/deepspeed tests/extended
|
||||
|
||||
- name: Failure short reports
|
||||
if: ${{ always() }}
|
||||
run: cat reports/tests_torch_cuda_extensions_gpu_failures_short.txt
|
||||
|
||||
- name: Test suite reports artifacts
|
||||
if: ${{ always() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: run_tests_torch_cuda_extensions_gpu_test_reports
|
||||
path: reports
|
||||
|
||||
run_all_tests_torch_cuda_extensions_multi_gpu:
|
||||
runs-on: [self-hosted, docker-gpu, multi-gpu]
|
||||
container:
|
||||
image: nvcr.io/nvidia/pytorch:21.03-py3
|
||||
options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||
steps:
|
||||
- name: Launcher docker
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: NVIDIA-SMI
|
||||
run: |
|
||||
nvidia-smi
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
apt -y update && apt install -y libaio-dev
|
||||
pip install --upgrade pip
|
||||
pip install .[testing,deepspeed,fairscale]
|
||||
|
||||
- name: Are GPUs recognized by our DL frameworks
|
||||
run: |
|
||||
python -c "import torch; print('Cuda available:', torch.cuda.is_available())"
|
||||
python -c "import torch; print('Cuda version:', torch.version.cuda)"
|
||||
python -c "import torch; print('CuDNN version:', torch.backends.cudnn.version())"
|
||||
python -c "import torch; print('Number of GPUs available:', torch.cuda.device_count())"
|
||||
|
||||
- name: Run all tests on GPU
|
||||
run: |
|
||||
python -m pytest -n 1 --dist=loadfile --make-reports=tests_torch_cuda_extensions_multi_gpu tests/deepspeed tests/extended
|
||||
|
||||
- name: Failure short reports
|
||||
if: ${{ always() }}
|
||||
run: cat reports/tests_torch_cuda_extensions_multi_gpu_failures_short.txt
|
||||
|
||||
- name: Test suite reports artifacts
|
||||
if: ${{ always() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: run_tests_torch_cuda_extensions_multi_gpu_test_reports
|
||||
path: reports
|
||||
|
||||
send_results:
|
||||
name: Send results to webhook
|
||||
runs-on: ubuntu-latest
|
||||
if: always()
|
||||
needs: [
|
||||
run_all_tests_torch_gpu,
|
||||
run_all_tests_tf_gpu,
|
||||
run_all_tests_torch_multi_gpu,
|
||||
run_all_tests_tf_multi_gpu,
|
||||
run_all_tests_torch_cuda_extensions_gpu,
|
||||
run_all_tests_torch_cuda_extensions_multi_gpu
|
||||
]
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- uses: actions/download-artifact@v2
|
||||
|
||||
- name: Send message to Slack
|
||||
env:
|
||||
CI_SLACK_BOT_TOKEN: ${{ secrets.CI_SLACK_BOT_TOKEN }}
|
||||
CI_SLACK_CHANNEL_ID: ${{ secrets.CI_SLACK_CHANNEL_ID }}
|
||||
|
||||
|
||||
run: |
|
||||
pip install slack_sdk
|
||||
python utils/notification_service.py scheduled
|
||||
|
27
.github/workflows/stale.yml
vendored
Normal file
27
.github/workflows/stale.yml
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
name: Stale Bot
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: "0 15 * * *"
|
||||
|
||||
jobs:
|
||||
close_stale_issues:
|
||||
name: Close Stale Issues
|
||||
if: github.repository == 'huggingface/transformers'
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Setup Python
|
||||
uses: actions/setup-python@v1
|
||||
with:
|
||||
python-version: 3.7
|
||||
|
||||
- name: Install requirements
|
||||
run: |
|
||||
pip install PyGithub
|
||||
- name: Close stale issues
|
||||
run: |
|
||||
python scripts/stale.py
|
6
.gitignore
vendored
6
.gitignore
vendored
@ -9,8 +9,7 @@ __pycache__/
|
||||
*.so
|
||||
|
||||
# tests and logs
|
||||
tests/fixtures/*
|
||||
!tests/fixtures/sample_text_no_unicode.txt
|
||||
tests/fixtures/cached_*_text.txt
|
||||
logs/
|
||||
lightning_logs/
|
||||
lang_code_data/
|
||||
@ -159,3 +158,6 @@ tags
|
||||
|
||||
# pre-commit
|
||||
.pre-commit*
|
||||
|
||||
# .lock
|
||||
*.lock
|
@ -1,3 +1,19 @@
|
||||
<!---
|
||||
Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
-->
|
||||
|
||||
# How to contribute to transformers?
|
||||
|
||||
Everyone is welcome to contribute, and we value everybody's contribution. Code
|
||||
@ -20,6 +36,13 @@ There are 4 ways you can contribute to transformers:
|
||||
* Contributing to the examples or to the documentation;
|
||||
* Submitting issues related to bugs or desired new features.
|
||||
|
||||
In particular there is a special [Good First
|
||||
Issue](https://github.com/huggingface/transformers/contribute) listing. Tt will give you a list of
|
||||
open Issues that are open to anybody to work on. Just comment in the issue that you'd like to work
|
||||
on it. In that same listing you will also find some Issues with `Good Second Issue` label. These are
|
||||
typically slightly more complicated than the Issues with just `Good First Issue` label. But if you
|
||||
feel you know what you're doing, go for it.
|
||||
|
||||
*All are equally valuable to the community.*
|
||||
|
||||
## Submitting a new issue or feature request
|
||||
@ -30,7 +53,7 @@ feedback.
|
||||
|
||||
### Did you find a bug?
|
||||
|
||||
The transformers are robust and reliable thanks to the users who notify us of
|
||||
The 🤗 Transformers library is robust and reliable thanks to the users who notify us of
|
||||
the problems they encounter. So thank you for reporting an issue.
|
||||
|
||||
First, we would really appreciate it if you could **make sure the bug was not
|
||||
@ -125,7 +148,7 @@ Follow these steps to start contributing:
|
||||
$ git checkout -b a-descriptive-name-for-my-changes
|
||||
```
|
||||
|
||||
**do not** work on the `master` branch.
|
||||
**Do not** work on the `master` branch.
|
||||
|
||||
4. Set up a development environment by running the following command in a virtual environment:
|
||||
|
||||
@ -269,7 +292,7 @@ $ python -m pytest -n auto --dist=loadfile -s -v ./tests/
|
||||
and for the examples:
|
||||
|
||||
```bash
|
||||
$ pip install -r examples/requirements.txt # only needed the first time
|
||||
$ pip install -r examples/xxx/requirements.txt # only needed the first time
|
||||
$ python -m pytest -n auto --dist=loadfile -s -v ./examples/
|
||||
```
|
||||
In fact, that's how `make test` and `make test-examples` are implemented (sans the `pip install` line)!
|
||||
@ -312,8 +335,28 @@ for more information.
|
||||
|
||||
### Develop on Windows
|
||||
|
||||
On windows, you need to configure git to transform Windows `CRLF` line endings to Linux `LF` line endings:
|
||||
|
||||
`git config core.autocrlf input`
|
||||
|
||||
One way one can run the make command on Window is to pass by MSYS2:
|
||||
|
||||
1. [Download MSYS2](https://www.msys2.org/), we assume to have it installed in C:\msys64
|
||||
2. Open the command line C:\msys64\msys2.exe (it should be available from the start menu)
|
||||
3. Run in the shell: `pacman -Syu` and install make with `pacman -S make`
|
||||
4. Add `C:\msys64\usr\bin` to your PATH environment variable.
|
||||
|
||||
You can now use `make` from any terminal (Powershell, cmd.exe, etc) 🎉
|
||||
|
||||
### Syncing forked master with upstream (HuggingFace) master
|
||||
|
||||
To avoid pinging the upstream repository which adds reference notes to each upstream PR and sends unnessary notifications to the developers involved in these PRs,
|
||||
when syncing the master branch of a forked repository, please, follow these steps:
|
||||
1. When possible, avoid syncing with the upstream using a branch and PR on the forked repository. Instead merge directly into the forked master.
|
||||
2. If a PR is absolutely necessary, use the following steps after checking out your branch:
|
||||
```
|
||||
$ git checkout -b your-branch-for-syncing
|
||||
$ git pull --squash --no-commit upstream master
|
||||
$ git commit -m '<your message without GitHub references>'
|
||||
$ git push --set-upstream origin your-branch-for-syncing
|
||||
```
|
||||
|
277
ISSUES.md
Normal file
277
ISSUES.md
Normal file
@ -0,0 +1,277 @@
|
||||
<!---
|
||||
Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
-->
|
||||
|
||||
# How To Request Support
|
||||
|
||||
This is an Open Source Project so please be mindful that like in any other project of this kind there is no obligation to answer all requests for help.
|
||||
|
||||
However, we want to encourage you to ask for help whenever you think it's needed! We are happy about every question we get because it allows us to better understand your needs, possible misunderstandings, and most importantly a way for you to help us make this library better. That being said, this document's main purpose is to provide guidelines at how you can formulate your requests to increase your chances to be understood and to get support.
|
||||
|
||||
There are two main venues to receive support: [the forums](https://discuss.huggingface.co/) and [the GitHub issues](https://github.com/huggingface/transformers/issues).
|
||||
|
||||
## The Forums
|
||||
|
||||
[The user forums](https://discuss.huggingface.co/) are supported by the wide community of the library users and backed up by developers when needed.
|
||||
|
||||
If you have a difficulty with deploying this library or some questions, or you'd like to discuss a new feature, please first consider discussing those things at the forums. Only when you feel your subject matter has been crystalized and you still need support from the library developers do proceed to file an [issue](https://github.com/huggingface/transformers/issues).
|
||||
|
||||
In particular all "Please explain" questions or objectively very user-specific feature requests belong to the forums. Here are some example of such questions:
|
||||
|
||||
* "I would like to use a BertModel within a RL-Agent for a customer support service. How can I use a BertForMaskedLM in my ChatBotModel?"
|
||||
|
||||
* "Could you please explain why T5 has no positional embedding matrix under T5Model?"
|
||||
|
||||
* "How should I set my generation parameters for translation?"
|
||||
|
||||
* "How to train T5 on De->En translation?"
|
||||
|
||||
|
||||
## The GitHub Issues
|
||||
|
||||
Everything which hints at a bug should be opened as an [issue](https://github.com/huggingface/transformers/issues).
|
||||
|
||||
You are not required to read the following guidelines before opening an issue. However, if you notice that your issue doesn't get any replies, chances are that the developers have one or several difficulties with its quality. In this case, reading the following points and adjusting your issue accordingly could help.
|
||||
|
||||
1. Before posting an issue, first search for already posted issues, since chances are someone has already asked a similar question before you.
|
||||
|
||||
If you use Google your search query should be:
|
||||
|
||||
```
|
||||
"huggingface" "transformers" your query
|
||||
```
|
||||
|
||||
The first two quoted words tell Google to limit the search to the context of the Huggingface Transformers. The remainder is your query - most commonly this would be the error message the software fails with. We will go deeper into details shortly.
|
||||
|
||||
The results of such a query will typically match GitHub issues, Hugging Face forums, StackExchange, and blogs.
|
||||
|
||||
If you find relevant hints, you may choose to continue the discussion there if you have follow up questions.
|
||||
|
||||
If what you found is similar but doesn't quite answer your problem, please, post a new issue and do include links to similar issues or forum discussions you may have found.
|
||||
|
||||
Let's look at some examples:
|
||||
|
||||
The error message, often referred to as an assertion, tells us what went wrong. Here is an example of an assertion:
|
||||
|
||||
```python
|
||||
Traceback (most recent call last):
|
||||
File "<string>", line 1, in <module>
|
||||
File "/transformers/src/transformers/__init__.py", line 34, in <module>
|
||||
from . import dependency_versions_check
|
||||
File "/transformers/src/transformers/dependency_versions_check.py", line 34, in <module>
|
||||
from .file_utils import is_tokenizers_available
|
||||
File "/transformers/src/transformers/file_utils.py", line 40, in <module>
|
||||
from tqdm.auto import tqdm
|
||||
ModuleNotFoundError: No module named 'tqdm.auto'
|
||||
```
|
||||
|
||||
and it typically includes a traceback, so that we can see the full stack of calls the program made before it fails. This gives us the context to know why the program failed.
|
||||
|
||||
Going back to the above example. If you received this error search, look at the very last line of the error which is:
|
||||
|
||||
```python
|
||||
ModuleNotFoundError: No module named 'tqdm.auto'
|
||||
```
|
||||
|
||||
And now we can use it to do the searching on your favorite search engine:
|
||||
|
||||
1. first for `"huggingface" "transformers" "ModuleNotFoundError: No module named 'tqdm.auto'"`
|
||||
2. if you don't find relevant results, then search for just `"ModuleNotFoundError: No module named 'tqdm.auto'"`
|
||||
3. and finally if nothing still comes up, then remove the outside quotes: `ModuleNotFoundError: No module named 'tqdm.auto'`
|
||||
|
||||
If the error includes any messages that include bits unique to your filesystem, always remove those in the search query since other users will not have the same filesystem as yours. For example:
|
||||
|
||||
```bash
|
||||
python -c 'open("/tmp/wrong_path.txt", "r")'
|
||||
Traceback (most recent call last):
|
||||
File "<string>", line 1, in <module>
|
||||
FileNotFoundError: [Errno 2] No such file or directory: '/tmp/wrong_path.txt'
|
||||
```
|
||||
Here you'd search for just: `"FileNotFoundError: [Errno 2] No such file or directory"`
|
||||
|
||||
If the local information that you removed were inside the error message and you removed them you may need to remove double quotes since your query is no longer exact. So if the error message was something like:
|
||||
|
||||
```bash
|
||||
ValueError: '/tmp/wrong_path.txt' cannot be found
|
||||
```
|
||||
|
||||
then you'd search for `"ValueError" "cannot be found"`
|
||||
|
||||
As you search you will notice that when you don't use quotes often the search engines will return a variety of unrelated hits, which may or may not be what you want.
|
||||
|
||||
Experiment with different ways and find which approach gives the most satisfactory results.
|
||||
|
||||
2. Keep the issue short, providing the information that you think will aid the developers to understand your situation. Put yourself in the shoes of the person who has never seen your code or knows anything about your custom setup. This mental exercise will help to develop an intuition to what/what not to share"
|
||||
|
||||
3. If there is a software failure, always provide the full traceback, for example:
|
||||
|
||||
```python
|
||||
$ python -c 'import transformers'
|
||||
Traceback (most recent call last):
|
||||
File "<string>", line 1, in <module>
|
||||
File "/transformers/src/transformers/__init__.py", line 34, in <module>
|
||||
from . import dependency_versions_check
|
||||
File "/transformers/src/transformers/dependency_versions_check.py", line 34, in <module>
|
||||
from .file_utils import is_tokenizers_available
|
||||
File "/transformers/src/transformers/file_utils.py", line 40, in <module>
|
||||
from tqdm.auto import tqdm
|
||||
ModuleNotFoundError: No module named 'tqdm.auto'
|
||||
```
|
||||
|
||||
As compared to providing just the last line of the error message, e.g.:
|
||||
```python
|
||||
ModuleNotFoundError: No module named 'tqdm.auto'
|
||||
```
|
||||
which is not sufficient.
|
||||
|
||||
If your application is running on more than one GPU (e.g. under `DistributedDataParallel`) and typically getting every log and traceback printed multiple times, please make sure that you paste only one copy of it. At times the traceback from parallel processes may get interleaved - so either disentangle these or change the loggers to log only for `local_rank==0` so that only one process logs things.
|
||||
|
||||
4. When quoting a traceback, command line instructions and any type of code always enclose it in triple backticks inside the editor window, that is:
|
||||
|
||||
````
|
||||
```
|
||||
git clone https://github.com/huggingface/transformers
|
||||
cd transformers
|
||||
pip install .
|
||||
```
|
||||
````
|
||||
|
||||
If it's a command line with a long argument list, please consider breaking it down using backslashes and new lines. Here is an example of a good command line quote:
|
||||
|
||||
```bash
|
||||
cd examples/seq2seq
|
||||
python -m torch.distributed.launch --nproc_per_node=2 ./finetune_trainer.py \
|
||||
--model_name_or_path sshleifer/distill-mbart-en-ro-12-4 --data_dir wmt_en_ro \
|
||||
--output_dir output_dir --overwrite_output_dir \
|
||||
--do_train --n_train 500 --num_train_epochs 1 \
|
||||
--per_device_train_batch_size 1 --freeze_embeds \
|
||||
--src_lang en_XX --tgt_lang ro_RO --task translation \
|
||||
--fp16 --sharded_ddp
|
||||
```
|
||||
|
||||
If you don't break it up, one has to scroll horizontally which often makes it quite difficult to quickly see what's happening.
|
||||
|
||||
The backslashes allow us to copy the command directly into the console to run it, without needing to edit it.
|
||||
|
||||
5. Include only the important information that you think will help the developer to quickly identify the problem.
|
||||
|
||||
For example applications often create huge amounts of logs. Ask yourself whether providing all or parts of the log is useful.
|
||||
|
||||
Pasting a 100-1000 lines of log into the issue is an immediate turn off, since it will take a lot of time to figure out where the pertinent parts of the log are.
|
||||
|
||||
Attaching a full log can be helpful if it's done as an attachment, if it's enclosed in the following html code in the comment editor window:
|
||||
|
||||
```
|
||||
<details>
|
||||
<summary>Full log</summary>
|
||||
<pre>
|
||||
|
||||
many
|
||||
lines
|
||||
go
|
||||
here
|
||||
|
||||
</pre>
|
||||
</details>
|
||||
```
|
||||
|
||||
which would result in the following entry, which can be opened if desired, but otherwise takes little space.
|
||||
|
||||
<details>
|
||||
<summary>Full log</summary>
|
||||
<pre>
|
||||
many
|
||||
lines
|
||||
go
|
||||
here
|
||||
</pre>
|
||||
</details>
|
||||
|
||||
You could also provide a link to a pastebin service, but this is less beneficial since those links tend to expire quickly and future readers of your issue might not be able to access that log file anymore and may lack some context.
|
||||
|
||||
6. If this is an issue in your code, do try to reduce that code to a minimal example that still demonstrates the problem. Please ask at the forums if you have a hard time figuring how to do that. Please realize that we don't have the luxury of having time to try and understand all of your custom code.
|
||||
|
||||
If you really tried to make a short reproducible code but couldn't figure it out, it might be that having a traceback will give the developer enough information to know what's going on. But if it is not enough and we can't reproduce the problem, we can't really solve it.
|
||||
|
||||
Do not dispair if you can't figure it out from the begining, just share what you can and perhaps someone else will be able to help you at the forums.
|
||||
|
||||
If your setup involves any custom datasets, the best way to help us reproduce the problem is to create a [Google Colab notebook](https://colab.research.google.com/) that demonstrates the issue and once you verify that the issue still exists, include a link to that notebook in the Issue. Just make sure that you don't copy and paste the location bar url of the open notebook - as this is private and we won't be able to open it. Instead, you need to click on `Share` in the right upper corner of the notebook, select `Get Link` and then copy and paste the public link it will give to you.
|
||||
|
||||
7. If you forked off some of this project's code or example applications, please, do not ask us to go into your code repository and figure out what you may have done. The code is already very complex and unless there is an easy way to do a diff and it's a small diff, it won't be possible to find someone with time on their hands to make a lengthy investigation. Albeit, you might find someone at the forums who will be generous to do this for you.
|
||||
|
||||
8. Before reporting an issue, first, always try to update your environment to the latest official version of this library. We have no resources to go and debug older revisions, which could easily have bugs that have been fixed in the latest released version.
|
||||
|
||||
We understand that this is not always possible, especially when APIs change, in which case file an issue against the highest library version your environment can support.
|
||||
|
||||
Of course, if you upgrade the library, always retest that the problem is still there.
|
||||
|
||||
9. Please do not ask us to reproduce an issue with your custom data, since we don't have it. So, either you should use some existing dataset supported by HF datasets or you need to supply a code that generates a small sample on the fly, or some another quick and simple way to get it.
|
||||
|
||||
Please do not send us any non-public domain data that may require a license or a permission to be used.
|
||||
|
||||
10. Do not tag multiple developers on the issue unless you know this is expected, either because you asked them and they gave you an explicit permission to tag them or the issue template instructs you to do so.
|
||||
|
||||
The "who to tag for what domain" part of the issue template is there to help users direct their questions to the right developers who are designated maintainers of project's specific domains. They can then decide at their own discretion to tag other developers if they feel it'd help move the issue forward.
|
||||
|
||||
We currently don't have a triage service and we trust your capacity to identify the right domain and thus the persons to tag in your issue. If you are not sure, please use the forums to ask for guidance.
|
||||
|
||||
When in doubt, err on the side of not tagging a given person. If you tag multiple people out of context or permission don't be surprised if you get no response at all. Please remember that every time you tag someone, they get a notification and you're taking their time without their permission. Please be sensitive to that.
|
||||
|
||||
If you got helped by one of the developers in the past please don't tag them in future issues, unless they are listed in the issue template for the domain you are asking about or that developer gave you an explicit permission to tag them in future issues.
|
||||
|
||||
If you see a certain developer doing multiple and/or recent commits into a specific area of the project that you feel is relevant to your issue, it is not a good reason to tag them. Various developers may be fixing things that prevent them from moving forward, but often their work is focused on a totally different domain. And while they may or may not know how to help you with the problem at hand, it would benefit the whole community much more if they focus on the domain of their unique expertise.
|
||||
|
||||
11. Use the Edit button. Take your time, and re-read and improve the wording and formatting to make your posts and comments as easy to understand as possible.
|
||||
|
||||
Avoid posting multiple comments in a row, as each comment generates a notification for the developers tagged in that issue. If you happened to post multiple comments in a row, and nobody followed up yet - consider merging those into one or a few comments while editing the combined content to be coherent.
|
||||
|
||||
If you choose to edit your older comments after others posted follow up comments you need to be aware that your modifications might not be noticed, so if it's not a typo fixing, try to write a new comment flagging that something has been changed in the previous comments.
|
||||
|
||||
For example, the very first comment is the most important one. If while the thread unfolds you realize that things aren't as they seemed to you originally you may want to edit the first post to reflect the up-to-date understanding of the issue at hand so that it helps those who read your issue in the future quickly understand what's going on and not need to sift through dozens of comments. It also helps to indicate that the post was edited. So, those reading the thread later can understand why there might be certain discontinuity in the information flow.
|
||||
|
||||
Use bullets and items if you have lists of items and the outcome improves overall readability.
|
||||
|
||||
Use backticks to refer to class and function names, e.g. `BartModel` and `generate` as these stand out and improve the speed of a reader's comprehension.
|
||||
|
||||
Try not use italics and bold text too much as these often make the text more difficult to read.
|
||||
|
||||
|
||||
12. If you are cross-referencing a specific comment in a given thread or another issue, always link to that specific comment, rather than using the issue link. If you do the latter it could be quite impossible to find which specific comment you're referring to.
|
||||
|
||||
To get the link to the specific comment do not copy the url from the location bar of your browser, but instead, click the `...` icon in the upper right corner of the comment and then select "Copy Link".
|
||||
|
||||
For example the first link is a link to an issue, and the second to a specific comment in the same issue:
|
||||
|
||||
1. https://github.com/huggingface/transformers/issues/9257
|
||||
2. https://github.com/huggingface/transformers/issues/9257#issuecomment-749945162
|
||||
|
||||
|
||||
13. If you are replying to a last comment, it's totally fine to make your reply with just your comment in it. The readers can follow the information flow here.
|
||||
|
||||
But if you're replying to a comment that happened some comments back it's always a good practice to quote just the relevant lines you're replying it. The `>` is used for quoting, or you can always use the menu to do so. For example your editor box will look like:
|
||||
|
||||
```
|
||||
> How big is your gpu cluster?
|
||||
|
||||
Our cluster is made of 256 gpus.
|
||||
```
|
||||
|
||||
If you are addressing multiple comments, quote the relevant parts of each before your answer. Some people use the same comment to do multiple replies, others separate them into separate comments. Either way works. The latter approach helps for linking to a specific comment.
|
||||
|
||||
In general the best way to figure out what works the best is learn from issues posted by other people - see which issues get great responses and which get little to no response - observe what the posters who received great responses did differently from those who did not.
|
||||
|
||||
Thank you for reading this somewhat lengthy document. We would like to conclude that these are not absolute rules, but a friendly advice that will help maximize the chances for us to understand what you are trying to communicate, reproduce the problem then resolve it to your satisfaction and the benefit of the whole community.
|
||||
|
||||
If after reading this document there are remaining questions on how and why or there is a need for further elucidation, please, don't hesitate to ask your question in [this thread](https://discuss.huggingface.co/t/how-to-request-support/3128).
|
1
LICENSE
1
LICENSE
@ -1,3 +1,4 @@
|
||||
Copyright 2018- The Hugging Face team. All rights reserved.
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
|
54
Makefile
54
Makefile
@ -1,5 +1,7 @@
|
||||
.PHONY: modified_only_fixup extra_quality_checks quality style fixup fix-copies test test-examples docs
|
||||
.PHONY: deps_table_update modified_only_fixup extra_quality_checks quality style fixup fix-copies test test-examples docs
|
||||
|
||||
# make sure to test the local checkout in scripts and not the pre-installed one (don't use quotes!)
|
||||
export PYTHONPATH = src
|
||||
|
||||
check_dirs := examples tests src utils
|
||||
|
||||
@ -14,37 +16,55 @@ modified_only_fixup:
|
||||
echo "No library .py files were modified"; \
|
||||
fi
|
||||
|
||||
# Update src/transformers/dependency_versions_table.py
|
||||
|
||||
deps_table_update:
|
||||
@python setup.py deps_table_update
|
||||
|
||||
# autogenerating code
|
||||
|
||||
autogenerate_code: deps_table_update
|
||||
python utils/class_mapping_update.py
|
||||
|
||||
# Check that source code meets quality standards
|
||||
|
||||
extra_quality_checks:
|
||||
python utils/check_copies.py
|
||||
python utils/check_table.py
|
||||
python utils/check_dummies.py
|
||||
python utils/check_repo.py
|
||||
python utils/style_doc.py src/transformers docs/source --max_len 119
|
||||
python utils/check_inits.py
|
||||
|
||||
# this target runs checks on all files
|
||||
quality:
|
||||
black --check $(check_dirs)
|
||||
isort --check-only $(check_dirs)
|
||||
python utils/custom_init_isort.py --check_only
|
||||
flake8 $(check_dirs)
|
||||
python utils/style_doc.py src/transformers docs/source --max_len 119 --check_only
|
||||
${MAKE} extra_quality_checks
|
||||
|
||||
# Format source code automatically and check is there are any problems left that need manual fixing
|
||||
|
||||
extra_style_checks:
|
||||
python utils/custom_init_isort.py
|
||||
python utils/style_doc.py src/transformers docs/source --max_len 119
|
||||
|
||||
# this target runs checks on all files and potentially modifies some of them
|
||||
style:
|
||||
black $(check_dirs)
|
||||
isort $(check_dirs)
|
||||
python utils/style_doc.py src/transformers docs/source --max_len 119
|
||||
${MAKE} autogenerate_code
|
||||
${MAKE} extra_style_checks
|
||||
|
||||
# Super fast fix and check target that only works on relevant modified files since the branch was made
|
||||
|
||||
fixup: modified_only_fixup extra_quality_checks
|
||||
fixup: modified_only_fixup extra_style_checks autogenerate_code extra_quality_checks
|
||||
|
||||
# Make marked copies of snippets of codes conform to the original
|
||||
|
||||
fix-copies:
|
||||
python utils/check_copies.py --fix_and_overwrite
|
||||
python utils/check_table.py --fix_and_overwrite
|
||||
python utils/check_dummies.py --fix_and_overwrite
|
||||
|
||||
# Run tests for the library
|
||||
@ -55,9 +75,29 @@ test:
|
||||
# Run tests for examples
|
||||
|
||||
test-examples:
|
||||
python -m pytest -n auto --dist=loadfile -s -v ./examples/
|
||||
python -m pytest -n auto --dist=loadfile -s -v ./examples/pytorch/
|
||||
|
||||
# Run tests for SageMaker DLC release
|
||||
|
||||
test-sagemaker: # install sagemaker dependencies in advance with pip install .[sagemaker]
|
||||
TEST_SAGEMAKER=True python -m pytest -n auto -s -v ./tests/sagemaker
|
||||
|
||||
|
||||
# Check that docs can build
|
||||
|
||||
docs:
|
||||
cd docs && make html SPHINXOPTS="-W"
|
||||
cd docs && make html SPHINXOPTS="-W -j 4"
|
||||
|
||||
# Release stuff
|
||||
|
||||
pre-release:
|
||||
python utils/release.py
|
||||
|
||||
pre-patch:
|
||||
python utils/release.py --patch
|
||||
|
||||
post-release:
|
||||
python utils/release.py --post_release
|
||||
|
||||
post-patch:
|
||||
python utils/release.py --post_release --patch
|
||||
|
143
README.md
143
README.md
@ -1,3 +1,19 @@
|
||||
<!---
|
||||
Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
-->
|
||||
|
||||
<p align="center">
|
||||
<br>
|
||||
<img src="https://raw.githubusercontent.com/huggingface/transformers/master/docs/source/imgs/transformers_logo_name.png" width="400"/>
|
||||
@ -22,27 +38,24 @@
|
||||
</p>
|
||||
|
||||
<h3 align="center">
|
||||
<p>State-of-the-art Natural Language Processing for PyTorch and TensorFlow 2.0
|
||||
<p>State-of-the-art Natural Language Processing for Jax, PyTorch and TensorFlow
|
||||
</h3>
|
||||
|
||||
🤗 Transformers provides thousands of pretrained models to perform tasks on texts such as classification, information extraction, question answering, summarization, translation, text generation, etc in 100+ languages. Its aim is to make cutting-edge NLP easier to use for everyone.
|
||||
🤗 Transformers provides thousands of pretrained models to perform tasks on texts such as classification, information extraction, question answering, summarization, translation, text generation and more in over 100 languages. Its aim is to make cutting-edge NLP easier to use for everyone.
|
||||
|
||||
🤗 Transformers provides APIs to quickly download and use those pretrained models on a given text, fine-tune them on your own datasets then share them with the community on our [model hub](https://huggingface.co/models). At the same time, each python module defining an architecture can be used as a standalone and modified to enable quick research experiments.
|
||||
🤗 Transformers provides APIs to quickly download and use those pretrained models on a given text, fine-tune them on your own datasets and then share them with the community on our [model hub](https://huggingface.co/models). At the same time, each python module defining an architecture is fully standalone and can be modified to enable quick research experiments.
|
||||
|
||||
🤗 Transformers is backed by the two most popular deep learning libraries, [PyTorch](https://pytorch.org/) and [TensorFlow](https://www.tensorflow.org/), with a seamless integration between them, allowing you to train your models with one then load it for inference with the other.
|
||||
|
||||
### Recent contributors
|
||||
[](https://sourcerer.io/fame/clmnt/huggingface/transformers/links/0)[](https://sourcerer.io/fame/clmnt/huggingface/transformers/links/1)[](https://sourcerer.io/fame/clmnt/huggingface/transformers/links/2)[](https://sourcerer.io/fame/clmnt/huggingface/transformers/links/3)[](https://sourcerer.io/fame/clmnt/huggingface/transformers/links/4)[](https://sourcerer.io/fame/clmnt/huggingface/transformers/links/5)[](https://sourcerer.io/fame/clmnt/huggingface/transformers/links/6)[](https://sourcerer.io/fame/clmnt/huggingface/transformers/links/7)
|
||||
🤗 Transformers is backed by the three most popular deep learning libraries — [Jax](https://jax.readthedocs.io/en/latest/), [PyTorch](https://pytorch.org/) and [TensorFlow](https://www.tensorflow.org/) — with a seamless integration between them. It's straightforward to train your models with one before loading them for inference with the other.
|
||||
|
||||
## Online demos
|
||||
|
||||
You can test most of our models directly on their pages from the [model hub](https://huggingface.co/models). We also offer an [inference API](https://huggingface.co/pricing) to use those models.
|
||||
You can test most of our models directly on their pages from the [model hub](https://huggingface.co/models). We also offer [private model hosting, versioning, & an inference API](https://huggingface.co/pricing) for public and private models.
|
||||
|
||||
Here are a few examples:
|
||||
- [Masked word completion with BERT](https://huggingface.co/bert-base-uncased?text=Paris+is+the+%5BMASK%5D+of+France)
|
||||
- [Name Entity Recognition with Electra](https://huggingface.co/dbmdz/electra-large-discriminator-finetuned-conll03-english?text=My+name+is+Sarah+and+I+live+in+London+city)
|
||||
- [Text generation with GPT-2](https://huggingface.co/gpt2?text=A+long+time+ago%2C+)
|
||||
- [Natural Langugage Inference with RoBERTa](https://huggingface.co/roberta-large-mnli?text=The+dog+was+lost.+Nobody+lost+any+animal)
|
||||
- [Natural Language Inference with RoBERTa](https://huggingface.co/roberta-large-mnli?text=The+dog+was+lost.+Nobody+lost+any+animal)
|
||||
- [Summarization with BART](https://huggingface.co/facebook/bart-large-cnn?text=The+tower+is+324+metres+%281%2C063+ft%29+tall%2C+about+the+same+height+as+an+81-storey+building%2C+and+the+tallest+structure+in+Paris.+Its+base+is+square%2C+measuring+125+metres+%28410+ft%29+on+each+side.+During+its+construction%2C+the+Eiffel+Tower+surpassed+the+Washington+Monument+to+become+the+tallest+man-made+structure+in+the+world%2C+a+title+it+held+for+41+years+until+the+Chrysler+Building+in+New+York+City+was+finished+in+1930.+It+was+the+first+structure+to+reach+a+height+of+300+metres.+Due+to+the+addition+of+a+broadcasting+aerial+at+the+top+of+the+tower+in+1957%2C+it+is+now+taller+than+the+Chrysler+Building+by+5.2+metres+%2817+ft%29.+Excluding+transmitters%2C+the+Eiffel+Tower+is+the+second+tallest+free-standing+structure+in+France+after+the+Millau+Viaduct)
|
||||
- [Question answering with DistilBERT](https://huggingface.co/distilbert-base-uncased-distilled-squad?text=Which+name+is+also+used+to+describe+the+Amazon+rainforest+in+English%3F&context=The+Amazon+rainforest+%28Portuguese%3A+Floresta+Amaz%C3%B4nica+or+Amaz%C3%B4nia%3B+Spanish%3A+Selva+Amaz%C3%B3nica%2C+Amazon%C3%ADa+or+usually+Amazonia%3B+French%3A+For%C3%AAt+amazonienne%3B+Dutch%3A+Amazoneregenwoud%29%2C+also+known+in+English+as+Amazonia+or+the+Amazon+Jungle%2C+is+a+moist+broadleaf+forest+that+covers+most+of+the+Amazon+basin+of+South+America.+This+basin+encompasses+7%2C000%2C000+square+kilometres+%282%2C700%2C000+sq+mi%29%2C+of+which+5%2C500%2C000+square+kilometres+%282%2C100%2C000+sq+mi%29+are+covered+by+the+rainforest.+This+region+includes+territory+belonging+to+nine+nations.+The+majority+of+the+forest+is+contained+within+Brazil%2C+with+60%25+of+the+rainforest%2C+followed+by+Peru+with+13%25%2C+Colombia+with+10%25%2C+and+with+minor+amounts+in+Venezuela%2C+Ecuador%2C+Bolivia%2C+Guyana%2C+Suriname+and+French+Guiana.+States+or+departments+in+four+nations+contain+%22Amazonas%22+in+their+names.+The+Amazon+represents+over+half+of+the+planet%27s+remaining+rainforests%2C+and+comprises+the+largest+and+most+biodiverse+tract+of+tropical+rainforest+in+the+world%2C+with+an+estimated+390+billion+individual+trees+divided+into+16%2C000+species)
|
||||
- [Translation with T5](https://huggingface.co/t5-base?text=My+name+is+Wolfgang+and+I+live+in+Berlin)
|
||||
@ -51,20 +64,20 @@ Here are a few examples:
|
||||
|
||||
## Quick tour
|
||||
|
||||
To immediately use a model on a given text, we provide the `pipeline` API. Pipelines group together a pretrained model with the preprocessing that was used during that model training. Here is how to quickly use a pipeline to classify positive versus negative texts
|
||||
To immediately use a model on a given text, we provide the `pipeline` API. Pipelines group together a pretrained model with the preprocessing that was used during that model's training. Here is how to quickly use a pipeline to classify positive versus negative texts:
|
||||
|
||||
```python
|
||||
>>> from transformers import pipeline
|
||||
|
||||
# Allocate a pipeline for sentiment-analysis
|
||||
>>> classifier = pipeline('sentiment-analysis')
|
||||
>>> classifier('We are very happy to include pipeline into the transformers repository.')
|
||||
[{'label': 'POSITIVE', 'score': 0.9978193640708923}]
|
||||
>>> classifier('We are very happy to introduce pipeline to the transformers repository.')
|
||||
[{'label': 'POSITIVE', 'score': 0.9996980428695679}]
|
||||
```
|
||||
|
||||
The second line of code downloads and caches the pretrained model used by the pipeline, the third line evaluates it on the given text. Here the answer is "positive" with a confidence of 99.8%.
|
||||
The second line of code downloads and caches the pretrained model used by the pipeline, while the third evaluates it on the given text. Here the answer is "positive" with a confidence of 99.97%.
|
||||
|
||||
This is another example of pipeline used for that can extract question answers from some context:
|
||||
Many NLP tasks have a pre-trained `pipeline` ready to go. For example, we can easily extract question answers given context:
|
||||
|
||||
``` python
|
||||
>>> from transformers import pipeline
|
||||
@ -73,15 +86,15 @@ This is another example of pipeline used for that can extract question answers f
|
||||
>>> question_answerer = pipeline('question-answering')
|
||||
>>> question_answerer({
|
||||
... 'question': 'What is the name of the repository ?',
|
||||
... 'context': 'Pipeline have been included in the huggingface/transformers repository'
|
||||
... 'context': 'Pipeline has been included in the huggingface/transformers repository'
|
||||
... })
|
||||
{'score': 0.5135612454720828, 'start': 35, 'end': 59, 'answer': 'huggingface/transformers'}
|
||||
{'score': 0.30970096588134766, 'start': 34, 'end': 58, 'answer': 'huggingface/transformers'}
|
||||
|
||||
```
|
||||
|
||||
On top of the answer, the pretrained model used here returned its confidence score, along with the start position and its end position in the tokenized sentence. You can learn more about the tasks supported by the `pipeline` API in [this tutorial](https://huggingface.co/transformers/task_summary.html).
|
||||
In addition to the answer, the pretrained model used here returned its confidence score, along with the start position and end position of the answer in the tokenized sentence. You can learn more about the tasks supported by the `pipeline` API in [this tutorial](https://huggingface.co/transformers/task_summary.html).
|
||||
|
||||
To download and use any of the pretrained models on your given task, you just need to use those three lines of codes (PyTorch version):
|
||||
To download and use any of the pretrained models on your given task, all it takes is three lines of code. Here is the PyTorch version:
|
||||
```python
|
||||
>>> from transformers import AutoTokenizer, AutoModel
|
||||
|
||||
@ -91,7 +104,7 @@ To download and use any of the pretrained models on your given task, you just ne
|
||||
>>> inputs = tokenizer("Hello world!", return_tensors="pt")
|
||||
>>> outputs = model(**inputs)
|
||||
```
|
||||
or for TensorFlow:
|
||||
And here is the equivalent code for TensorFlow:
|
||||
```python
|
||||
>>> from transformers import AutoTokenizer, TFAutoModel
|
||||
|
||||
@ -102,9 +115,9 @@ or for TensorFlow:
|
||||
>>> outputs = model(**inputs)
|
||||
```
|
||||
|
||||
The tokenizer is responsible for all the preprocessing the pretrained model expects, and can be called directly on one (or list) of texts (as we can see on the fourth line of both code examples). It will output a dictionary you can directly pass to your model (which is done on the fifth line).
|
||||
The tokenizer is responsible for all the preprocessing the pretrained model expects, and can be called directly on a single string (as in the above examples) or a list. It will output a dictionary that you can use in downstream code or simply directly pass to your model using the ** argument unpacking operator.
|
||||
|
||||
The model itself is a regular [Pytorch `nn.Module`](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) or a [TensorFlow `tf.keras.Model`](https://www.tensorflow.org/api_docs/python/tf/keras/Model) (depending on your backend) which you can use normally. For instance, [this tutorial](https://huggingface.co/transformers/training.html) explains how to integrate such a model in classic PyTorch or TensorFlow training loop, or how to use our `Trainer` API to quickly fine-tune the on a new dataset.
|
||||
The model itself is a regular [Pytorch `nn.Module`](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) or a [TensorFlow `tf.keras.Model`](https://www.tensorflow.org/api_docs/python/tf/keras/Model) (depending on your backend) which you can use normally. [This tutorial](https://huggingface.co/transformers/training.html) explains how to integrate such a model into a classic PyTorch or TensorFlow training loop, or how to use our `Trainer` API to quickly fine-tune on a new dataset.
|
||||
|
||||
## Why should I use transformers?
|
||||
|
||||
@ -122,50 +135,78 @@ The model itself is a regular [Pytorch `nn.Module`](https://pytorch.org/docs/sta
|
||||
1. Choose the right framework for every part of a model's lifetime:
|
||||
- Train state-of-the-art models in 3 lines of code.
|
||||
- Move a single model between TF2.0/PyTorch frameworks at will.
|
||||
- Seamlessly pick the right framework for training, evaluation, production.
|
||||
- Seamlessly pick the right framework for training, evaluation and production.
|
||||
|
||||
1. Easily customize a model or an example to your needs:
|
||||
- Examples for each architecture to reproduce the results by the official authors of said architecture.
|
||||
- Expose the models internal as consistently as possible.
|
||||
- We provide examples for each architecture to reproduce the results published by its original authors.
|
||||
- Model internals are exposed as consistently as possible.
|
||||
- Model files can be used independently of the library for quick experiments.
|
||||
|
||||
## Why shouldn't I use transformers?
|
||||
|
||||
- This library is not a modular toolbox of building blocks for neural nets. The code in the model files is not refactored with additional abstractions on purpose, so that researchers can quickly iterate on each of the models without diving in additional abstractions/files.
|
||||
- This library is not a modular toolbox of building blocks for neural nets. The code in the model files is not refactored with additional abstractions on purpose, so that researchers can quickly iterate on each of the models without diving into additional abstractions/files.
|
||||
- The training API is not intended to work on any model but is optimized to work with the models provided by the library. For generic machine learning loops, you should use another library.
|
||||
- While we strive to present as many use cases as possible, the scripts in our [examples folder](https://github.com/huggingface/transformers/tree/master/examples) are just that: examples. It is expected that they won't work out-of-the box on your specific problem and that you will be required to change a few lines of code to adapt them to your needs.
|
||||
|
||||
## Installation
|
||||
|
||||
This repository is tested on Python 3.6+, PyTorch 1.0.0+ (PyTorch 1.3.1+ for [examples](https://github.com/huggingface/transformers/tree/master/examples)) and TensorFlow 2.0.
|
||||
### With pip
|
||||
|
||||
This repository is tested on Python 3.6+, Flax 0.3.2+, PyTorch 1.3.1+ and TensorFlow 2.3+.
|
||||
|
||||
You should install 🤗 Transformers in a [virtual environment](https://docs.python.org/3/library/venv.html). If you're unfamiliar with Python virtual environments, check out the [user guide](https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/).
|
||||
|
||||
First, create a virtual environment with the version of Python you're going to use and activate it.
|
||||
|
||||
Then, you will need to install one of, or both, TensorFlow 2.0 and PyTorch.
|
||||
Please refer to [TensorFlow installation page](https://www.tensorflow.org/install/pip#tensorflow-2.0-rc-is-available) and/or [PyTorch installation page](https://pytorch.org/get-started/locally/#start-locally) regarding the specific install command for your platform.
|
||||
Then, you will need to install at least one of Flax, PyTorch or TensorFlow.
|
||||
Please refer to [TensorFlow installation page](https://www.tensorflow.org/install/), [PyTorch installation page](https://pytorch.org/get-started/locally/#start-locally) and/or [Flax installation page](https://github.com/google/flax#quick-install) regarding the specific install command for your platform.
|
||||
|
||||
When TensorFlow 2.0 and/or PyTorch has been installed, 🤗 Transformers can be installed using pip as follows:
|
||||
When one of those backends has been installed, 🤗 Transformers can be installed using pip as follows:
|
||||
|
||||
```bash
|
||||
pip install transformers
|
||||
```
|
||||
|
||||
If you'd like to play with the examples, you must [install the library from source](https://huggingface.co/transformers/installation.html#installing-from-source).
|
||||
If you'd like to play with the examples or need the bleeding edge of the code and can't wait for a new release, you must [install the library from source](https://huggingface.co/transformers/installation.html#installing-from-source).
|
||||
|
||||
## Models architectures
|
||||
### With conda
|
||||
|
||||
Since Transformers version v4.0.0, we now have a conda channel: `huggingface`.
|
||||
|
||||
🤗 Transformers can be installed using conda as follows:
|
||||
|
||||
```shell script
|
||||
conda install -c huggingface transformers
|
||||
```
|
||||
|
||||
Follow the installation pages of Flax, PyTorch or TensorFlow to see how to install them with conda.
|
||||
|
||||
## Model architectures
|
||||
|
||||
**[All the model checkpoints](https://huggingface.co/models)** provided by 🤗 Transformers are seamlessly integrated from the huggingface.co [model hub](https://huggingface.co) where they are uploaded directly by [users](https://huggingface.co/users) and [organizations](https://huggingface.co/organizations).
|
||||
|
||||
Current number of checkpoints: 
|
||||
|
||||
🤗 Transformers currently provides the following architectures (see [here](https://huggingface.co/transformers/model_summary.html) for a high-level summary of each them):
|
||||
|
||||
1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.
|
||||
1. **[BART](https://huggingface.co/transformers/model_doc/bart.html)** (from Facebook) released with the paper [BART: Denoising Sequence-to-Sequence Pre-training for Natural Language Generation, Translation, and Comprehension](https://arxiv.org/pdf/1910.13461.pdf) by Mike Lewis, Yinhan Liu, Naman Goyal, Marjan Ghazvininejad, Abdelrahman Mohamed, Omer Levy, Ves Stoyanov and Luke Zettlemoyer.
|
||||
1. **[BARThez](https://huggingface.co/transformers/model_doc/barthez.html)** (from École polytechnique) released with the paper [BARThez: a Skilled Pretrained French Sequence-to-Sequence Model](https://arxiv.org/abs/2010.12321) by Moussa Kamal Eddine, Antoine J.-P. Tixier, Michalis Vazirgiannis.
|
||||
1. **[BERT](https://huggingface.co/transformers/model_doc/bert.html)** (from Google) released with the paper [BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding](https://arxiv.org/abs/1810.04805) by Jacob Devlin, Ming-Wei Chang, Kenton Lee and Kristina Toutanova.
|
||||
1. **[BERT For Sequence Generation](https://huggingface.co/transformers/model_doc/bertgeneration.html)** (from Google) released with the paper [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://arxiv.org/abs/1907.12461) by Sascha Rothe, Shashi Narayan, Aliaksei Severyn.
|
||||
1. **[BigBird-RoBERTa](https://huggingface.co/transformers/model_doc/bigbird.html)** (from Google Research) released with the paper [Big Bird: Transformers for Longer Sequences](https://arxiv.org/abs/2007.14062) by Manzil Zaheer, Guru Guruganesh, Avinava Dubey, Joshua Ainslie, Chris Alberti, Santiago Ontanon, Philip Pham, Anirudh Ravula, Qifan Wang, Li Yang, Amr Ahmed.
|
||||
1. **[BigBird-Pegasus](https://huggingface.co/transformers/model_doc/bigbird_pegasus.html)** (from Google Research) released with the paper [Big Bird: Transformers for Longer Sequences](https://arxiv.org/abs/2007.14062) by Manzil Zaheer, Guru Guruganesh, Avinava Dubey, Joshua Ainslie, Chris Alberti, Santiago Ontanon, Philip Pham, Anirudh Ravula, Qifan Wang, Li Yang, Amr Ahmed.
|
||||
1. **[Blenderbot](https://huggingface.co/transformers/model_doc/blenderbot.html)** (from Facebook) released with the paper [Recipes for building an open-domain chatbot](https://arxiv.org/abs/2004.13637) by Stephen Roller, Emily Dinan, Naman Goyal, Da Ju, Mary Williamson, Yinhan Liu, Jing Xu, Myle Ott, Kurt Shuster, Eric M. Smith, Y-Lan Boureau, Jason Weston.
|
||||
1. **[BlenderbotSmall](https://huggingface.co/transformers/model_doc/blenderbot_small.html)** (from Facebook) released with the paper [Recipes for building an open-domain chatbot](https://arxiv.org/abs/2004.13637) by Stephen Roller, Emily Dinan, Naman Goyal, Da Ju, Mary Williamson, Yinhan Liu, Jing Xu, Myle Ott, Kurt Shuster, Eric M. Smith, Y-Lan Boureau, Jason Weston.
|
||||
1. **[BORT](https://huggingface.co/transformers/model_doc/bort.html)** (from Alexa) released with the paper [Optimal Subarchitecture Extraction For BERT](https://arxiv.org/abs/2010.10499) by Adrian de Wynter and Daniel J. Perry.
|
||||
1. **[CamemBERT](https://huggingface.co/transformers/model_doc/camembert.html)** (from Inria/Facebook/Sorbonne) released with the paper [CamemBERT: a Tasty French Language Model](https://arxiv.org/abs/1911.03894) by Louis Martin*, Benjamin Muller*, Pedro Javier Ortiz Suárez*, Yoann Dupont, Laurent Romary, Éric Villemonte de la Clergerie, Djamé Seddah and Benoît Sagot.
|
||||
1. **[CLIP](https://huggingface.co/transformers/model_doc/clip.html)** from (OpenAI) released with the paper [Learning Transferable Visual Models From Natural Language Supervision](https://arxiv.org/abs/2103.00020) by Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, Ilya Sutskever.
|
||||
1. **[ConvBERT](https://huggingface.co/transformers/model_doc/convbert.html)** (from YituTech) released with the paper [ConvBERT: Improving BERT with Span-based Dynamic Convolution](https://arxiv.org/abs/2008.02496) by Zihang Jiang, Weihao Yu, Daquan Zhou, Yunpeng Chen, Jiashi Feng, Shuicheng Yan.
|
||||
1. **[CPM](https://huggingface.co/transformers/model_doc/cpm.html)** (from Tsinghua University) released with the paper [CPM: A Large-scale Generative Chinese Pre-trained Language Model](https://arxiv.org/abs/2012.00413) by Zhengyan Zhang, Xu Han, Hao Zhou, Pei Ke, Yuxian Gu, Deming Ye, Yujia Qin, Yusheng Su, Haozhe Ji, Jian Guan, Fanchao Qi, Xiaozhi Wang, Yanan Zheng, Guoyang Zeng, Huanqi Cao, Shengqi Chen, Daixuan Li, Zhenbo Sun, Zhiyuan Liu, Minlie Huang, Wentao Han, Jie Tang, Juanzi Li, Xiaoyan Zhu, Maosong Sun.
|
||||
1. **[CTRL](https://huggingface.co/transformers/model_doc/ctrl.html)** (from Salesforce) released with the paper [CTRL: A Conditional Transformer Language Model for Controllable Generation](https://arxiv.org/abs/1909.05858) by Nitish Shirish Keskar*, Bryan McCann*, Lav R. Varshney, Caiming Xiong and Richard Socher.
|
||||
1. **[DeBERTa](https://huggingface.co/transformers/model_doc/deberta.html)** (from Microsoft Research) released with the paper [DeBERTa: Decoding-enhanced BERT with Disentangled Attention](https://arxiv.org/abs/2006.03654) by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen.
|
||||
1. **[DeBERTa](https://huggingface.co/transformers/model_doc/deberta.html)** (from Microsoft) released with the paper [DeBERTa: Decoding-enhanced BERT with Disentangled Attention](https://arxiv.org/abs/2006.03654) by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen.
|
||||
1. **[DeBERTa-v2](https://huggingface.co/transformers/model_doc/deberta_v2.html)** (from Microsoft) released with the paper [DeBERTa: Decoding-enhanced BERT with Disentangled Attention](https://arxiv.org/abs/2006.03654) by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen.
|
||||
1. **[DeiT](https://huggingface.co/transformers/model_doc/deit.html)** (from Facebook) released with the paper [Training data-efficient image transformers & distillation through attention](https://arxiv.org/abs/2012.12877) by Hugo Touvron, Matthieu Cord, Matthijs Douze, Francisco Massa, Alexandre Sablayrolles, Hervé Jégou.
|
||||
1. **[DialoGPT](https://huggingface.co/transformers/model_doc/dialogpt.html)** (from Microsoft Research) released with the paper [DialoGPT: Large-Scale Generative Pre-training for Conversational Response Generation](https://arxiv.org/abs/1911.00536) by Yizhe Zhang, Siqi Sun, Michel Galley, Yen-Chun Chen, Chris Brockett, Xiang Gao, Jianfeng Gao, Jingjing Liu, Bill Dolan.
|
||||
1. **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace), released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same method has been applied to compress GPT2 into [DistilGPT2](https://github.com/huggingface/transformers/tree/master/examples/distillation), RoBERTa into [DistilRoBERTa](https://github.com/huggingface/transformers/tree/master/examples/distillation), Multilingual BERT into [DistilmBERT](https://github.com/huggingface/transformers/tree/master/examples/distillation) and a German version of DistilBERT.
|
||||
1. **[DPR](https://huggingface.co/transformers/model_doc/dpr.html)** (from Facebook) released with the paper [Dense Passage Retrieval
|
||||
@ -176,28 +217,42 @@ Min, Patrick Lewis, Ledell Wu, Sergey Edunov, Danqi Chen, and Wen-tau Yih.
|
||||
1. **[Funnel Transformer](https://huggingface.co/transformers/model_doc/funnel.html)** (from CMU/Google Brain) released with the paper [Funnel-Transformer: Filtering out Sequential Redundancy for Efficient Language Processing](https://arxiv.org/abs/2006.03236) by Zihang Dai, Guokun Lai, Yiming Yang, Quoc V. Le.
|
||||
1. **[GPT](https://huggingface.co/transformers/model_doc/gpt.html)** (from OpenAI) released with the paper [Improving Language Understanding by Generative Pre-Training](https://blog.openai.com/language-unsupervised/) by Alec Radford, Karthik Narasimhan, Tim Salimans and Ilya Sutskever.
|
||||
1. **[GPT-2](https://huggingface.co/transformers/model_doc/gpt2.html)** (from OpenAI) released with the paper [Language Models are Unsupervised Multitask Learners](https://blog.openai.com/better-language-models/) by Alec Radford*, Jeffrey Wu*, Rewon Child, David Luan, Dario Amodei** and Ilya Sutskever**.
|
||||
1. **[GPT Neo](https://huggingface.co/transformers/model_doc/gpt_neo.html)** (from EleutherAI) released in the repository [EleutherAI/gpt-neo](https://github.com/EleutherAI/gpt-neo) by Sid Black, Stella Biderman, Leo Gao, Phil Wang and Connor Leahy.
|
||||
1. **[I-BERT](https://huggingface.co/transformers/model_doc/ibert.html)** (from Berkeley) released with the paper [I-BERT: Integer-only BERT Quantization](https://arxiv.org/abs/2101.01321) by Sehoon Kim, Amir Gholami, Zhewei Yao, Michael W. Mahoney, Kurt Keutzer
|
||||
1. **[LayoutLM](https://huggingface.co/transformers/model_doc/layoutlm.html)** (from Microsoft Research Asia) released with the paper [LayoutLM: Pre-training of Text and Layout for Document Image Understanding](https://arxiv.org/abs/1912.13318) by Yiheng Xu, Minghao Li, Lei Cui, Shaohan Huang, Furu Wei, Ming Zhou.
|
||||
1. **[LED](https://huggingface.co/transformers/model_doc/led.html)** (from AllenAI) released with the paper [Longformer: The Long-Document Transformer](https://arxiv.org/abs/2004.05150) by Iz Beltagy, Matthew E. Peters, Arman Cohan.
|
||||
1. **[Longformer](https://huggingface.co/transformers/model_doc/longformer.html)** (from AllenAI) released with the paper [Longformer: The Long-Document Transformer](https://arxiv.org/abs/2004.05150) by Iz Beltagy, Matthew E. Peters, Arman Cohan.
|
||||
1. **[LUKE](https://huggingface.co/transformers/model_doc/luke.html)** (from Studio Ousia) released with the paper [LUKE: Deep Contextualized Entity Representations with Entity-aware Self-attention](https://arxiv.org/abs/2010.01057) by Ikuya Yamada, Akari Asai, Hiroyuki Shindo, Hideaki Takeda, Yuji Matsumoto.
|
||||
1. **[LXMERT](https://huggingface.co/transformers/model_doc/lxmert.html)** (from UNC Chapel Hill) released with the paper [LXMERT: Learning Cross-Modality Encoder Representations from Transformers for Open-Domain Question Answering](https://arxiv.org/abs/1908.07490) by Hao Tan and Mohit Bansal.
|
||||
1. **[M2M100](https://huggingface.co/transformers/model_doc/m2m_100.html)** (from Facebook) released with the paper [Beyond English-Centric Multilingual Machine Translation](https://arxiv.org/abs/2010.11125) by by Angela Fan, Shruti Bhosale, Holger Schwenk, Zhiyi Ma, Ahmed El-Kishky, Siddharth Goyal, Mandeep Baines, Onur Celebi, Guillaume Wenzek, Vishrav Chaudhary, Naman Goyal, Tom Birch, Vitaliy Liptchinsky, Sergey Edunov, Edouard Grave, Michael Auli, Armand Joulin.
|
||||
1. **[MarianMT](https://huggingface.co/transformers/model_doc/marian.html)** Machine translation models trained using [OPUS](http://opus.nlpl.eu/) data by Jörg Tiedemann. The [Marian Framework](https://marian-nmt.github.io/) is being developed by the Microsoft Translator Team.
|
||||
1. **[MBart](https://huggingface.co/transformers/model_doc/mbart.html)** (from Facebook) released with the paper [Multilingual Denoising Pre-training for Neural Machine Translation](https://arxiv.org/abs/2001.08210) by Yinhan Liu, Jiatao Gu, Naman Goyal, Xian Li, Sergey Edunov, Marjan Ghazvininejad, Mike Lewis, Luke Zettlemoyer.
|
||||
1. **[MBart-50](https://huggingface.co/transformers/model_doc/mbart.html)** (from Facebook) released with the paper [Multilingual Translation with Extensible Multilingual Pretraining and Finetuning](https://arxiv.org/abs/2008.00401) by Yuqing Tang, Chau Tran, Xian Li, Peng-Jen Chen, Naman Goyal, Vishrav Chaudhary, Jiatao Gu, Angela Fan.
|
||||
1. **[Megatron-BERT](https://huggingface.co/transformers/model_doc/megatron_bert.html)** (from NVIDIA) released with the paper [Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism](https://arxiv.org/abs/1909.08053) by Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper and Bryan Catanzaro.
|
||||
1. **[Megatron-GPT2](https://huggingface.co/transformers/model_doc/megatron_gpt2.html)** (from NVIDIA) released with the paper [Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism](https://arxiv.org/abs/1909.08053) by Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper and Bryan Catanzaro.
|
||||
1. **[MPNet](https://huggingface.co/transformers/model_doc/mpnet.html)** (from Microsoft Research) released with the paper [MPNet: Masked and Permuted Pre-training for Language Understanding](https://arxiv.org/abs/2004.09297) by Kaitao Song, Xu Tan, Tao Qin, Jianfeng Lu, Tie-Yan Liu.
|
||||
1. **[MT5](https://huggingface.co/transformers/model_doc/mt5.html)** (from Google AI) released with the paper [mT5: A massively multilingual pre-trained text-to-text transformer](https://arxiv.org/abs/2010.11934) by Linting Xue, Noah Constant, Adam Roberts, Mihir Kale, Rami Al-Rfou, Aditya Siddhant, Aditya Barua, Colin Raffel.
|
||||
1. **[Pegasus](https://huggingface.co/transformers/model_doc/pegasus.html)** (from Google) released with the paper [PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive Summarization](https://arxiv.org/abs/1912.08777)> by Jingqing Zhang, Yao Zhao, Mohammad Saleh and Peter J. Liu.
|
||||
1. **[ProphetNet](https://huggingface.co/transformers/model_doc/prophetnet.html)** (from Microsoft Research) released with the paper [ProphetNet: Predicting Future N-gram for Sequence-to-Sequence Pre-training](https://arxiv.org/abs/2001.04063) by Yu Yan, Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang and Ming Zhou.
|
||||
1. **[Reformer](https://huggingface.co/transformers/model_doc/reformer.html)** (from Google Research) released with the paper [Reformer: The Efficient Transformer](https://arxiv.org/abs/2001.04451) by Nikita Kitaev, Łukasz Kaiser, Anselm Levskaya.
|
||||
1. **[RoBERTa](https://huggingface.co/transformers/model_doc/roberta.html)** (from Facebook), released together with the paper a [Robustly Optimized BERT Pretraining Approach](https://arxiv.org/abs/1907.11692) by Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, Veselin Stoyanov.
|
||||
ultilingual BERT into [DistilmBERT](https://github.com/huggingface/transformers/tree/master/examples/distillation) and a German version of DistilBERT.
|
||||
1. **[SpeechToTextTransformer](https://huggingface.co/transformers/model_doc/speech_to_text.html)** (from Facebook), released together with the paper [fairseq S2T: Fast Speech-to-Text Modeling with fairseq](https://arxiv.org/abs/2010.05171) by Changhan Wang, Yun Tang, Xutai Ma, Anne Wu, Dmytro Okhonko, Juan Pino.
|
||||
1. **[SqueezeBert](https://huggingface.co/transformers/model_doc/squeezebert.html)** released with the paper [SqueezeBERT: What can computer vision teach NLP about efficient neural networks?](https://arxiv.org/abs/2006.11316) by Forrest N. Iandola, Albert E. Shaw, Ravi Krishna, and Kurt W. Keutzer.
|
||||
1. **[T5](https://huggingface.co/transformers/model_doc/t5.html)** (from Google AI) released with the paper [Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer](https://arxiv.org/abs/1910.10683) by Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu.
|
||||
1. **[TAPAS](https://huggingface.co/transformers/model_doc/tapas.html)** (from Google AI) released with the paper [TAPAS: Weakly Supervised Table Parsing via Pre-training](https://arxiv.org/abs/2004.02349) by Jonathan Herzig, Paweł Krzysztof Nowak, Thomas Müller, Francesco Piccinno and Julian Martin Eisenschlos.
|
||||
1. **[Transformer-XL](https://huggingface.co/transformers/model_doc/transformerxl.html)** (from Google/CMU) released with the paper [Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context](https://arxiv.org/abs/1901.02860) by Zihang Dai*, Zhilin Yang*, Yiming Yang, Jaime Carbonell, Quoc V. Le, Ruslan Salakhutdinov.
|
||||
1. **[Vision Transformer (ViT)](https://huggingface.co/transformers/model_doc/vit.html)** (from Google AI) released with the paper [An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale](https://arxiv.org/abs/2010.11929) by Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, Neil Houlsby.
|
||||
1. **[Wav2Vec2](https://huggingface.co/transformers/model_doc/wav2vec2.html)** (from Facebook AI) released with the paper [wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations](https://arxiv.org/abs/2006.11477) by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli.
|
||||
1. **[XLM](https://huggingface.co/transformers/model_doc/xlm.html)** (from Facebook) released together with the paper [Cross-lingual Language Model Pretraining](https://arxiv.org/abs/1901.07291) by Guillaume Lample and Alexis Conneau.
|
||||
1. **[XLM-ProphetNet](https://huggingface.co/transformers/model_doc/xlmprophetnet.html)** (from Microsoft Research) released with the paper [ProphetNet: Predicting Future N-gram for Sequence-to-Sequence Pre-training](https://arxiv.org/abs/2001.04063) by Yu Yan, Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang and Ming Zhou.
|
||||
1. **[XLM-RoBERTa](https://huggingface.co/transformers/model_doc/xlmroberta.html)** (from Facebook AI), released together with the paper [Unsupervised Cross-lingual Representation Learning at Scale](https://arxiv.org/abs/1911.02116) by Alexis Conneau*, Kartikay Khandelwal*, Naman Goyal, Vishrav Chaudhary, Guillaume Wenzek, Francisco Guzmán, Edouard Grave, Myle Ott, Luke Zettlemoyer and Veselin Stoyanov.
|
||||
1. **[XLNet](https://huggingface.co/transformers/model_doc/xlnet.html)** (from Google/CMU) released with the paper [XLNet: Generalized Autoregressive Pretraining for Language Understanding](https://arxiv.org/abs/1906.08237) by Zhilin Yang*, Zihang Dai*, Yiming Yang, Jaime Carbonell, Ruslan Salakhutdinov, Quoc V. Le.
|
||||
1. **[Other community models](https://huggingface.co/models)**, contributed by the [community](https://huggingface.co/users).
|
||||
1. **[XLSR-Wav2Vec2](https://huggingface.co/transformers/model_doc/xlsr_wav2vec2.html)** (from Facebook AI) released with the paper [Unsupervised Cross-Lingual Representation Learning For Speech Recognition](https://arxiv.org/abs/2006.13979) by Alexis Conneau, Alexei Baevski, Ronan Collobert, Abdelrahman Mohamed, Michael Auli.
|
||||
1. Want to contribute a new model? We have added a **detailed guide and templates** to guide you in the process of adding a new model. You can find them in the [`templates`](./templates) folder of the repository. Be sure to check the [contributing guidelines](./CONTRIBUTING.md) and contact the maintainers or open an issue to collect feedbacks before starting your PR.
|
||||
|
||||
These implementations have been tested on several datasets (see the example scripts) and should match the performances of the original implementations. You can find more details on the performances in the Examples section of the [documentation](https://huggingface.co/transformers/examples.html).
|
||||
To check if each model has an implementation in Flax, PyTorch or TensorFlow, or has an associated tokenizer backed by the 🤗 Tokenizers library, refer to [this table](https://huggingface.co/transformers/index.html#bigtable).
|
||||
|
||||
These implementations have been tested on several datasets (see the example scripts) and should match the performance of the original implementations. You can find more details on performance in the Examples section of the [documentation](https://huggingface.co/transformers/examples.html).
|
||||
|
||||
|
||||
## Learn more
|
||||
@ -214,13 +269,17 @@ These implementations have been tested on several datasets (see the example scri
|
||||
|
||||
## Citation
|
||||
|
||||
We now have a [paper](https://arxiv.org/abs/1910.03771) you can cite for the 🤗 Transformers library:
|
||||
We now have a [paper](https://www.aclweb.org/anthology/2020.emnlp-demos.6/) you can cite for the 🤗 Transformers library:
|
||||
```bibtex
|
||||
@article{Wolf2019HuggingFacesTS,
|
||||
title={HuggingFace's Transformers: State-of-the-art Natural Language Processing},
|
||||
author={Thomas Wolf and Lysandre Debut and Victor Sanh and Julien Chaumond and Clement Delangue and Anthony Moi and Pierric Cistac and Tim Rault and Rémi Louf and Morgan Funtowicz and Joe Davison and Sam Shleifer and Patrick von Platen and Clara Ma and Yacine Jernite and Julien Plu and Canwen Xu and Teven Le Scao and Sylvain Gugger and Mariama Drame and Quentin Lhoest and Alexander M. Rush},
|
||||
journal={ArXiv},
|
||||
year={2019},
|
||||
volume={abs/1910.03771}
|
||||
@inproceedings{wolf-etal-2020-transformers,
|
||||
title = "Transformers: State-of-the-Art Natural Language Processing",
|
||||
author = "Thomas Wolf and Lysandre Debut and Victor Sanh and Julien Chaumond and Clement Delangue and Anthony Moi and Pierric Cistac and Tim Rault and Rémi Louf and Morgan Funtowicz and Joe Davison and Sam Shleifer and Patrick von Platen and Clara Ma and Yacine Jernite and Julien Plu and Canwen Xu and Teven Le Scao and Sylvain Gugger and Mariama Drame and Quentin Lhoest and Alexander M. Rush",
|
||||
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: System Demonstrations",
|
||||
month = oct,
|
||||
year = "2020",
|
||||
address = "Online",
|
||||
publisher = "Association for Computational Linguistics",
|
||||
url = "https://www.aclweb.org/anthology/2020.emnlp-demos.6",
|
||||
pages = "38--45"
|
||||
}
|
||||
```
|
||||
|
@ -53,7 +53,7 @@ RUN git clone https://github.com/huggingface/transformers.git && \
|
||||
git checkout CI && \
|
||||
cd .. && \
|
||||
pip install ./transformers && \
|
||||
pip install -r ./transformers/examples/requirements.txt && \
|
||||
pip install -r ./transformers/examples/pytorch/_test_requirements.txt && \
|
||||
pip install pytest
|
||||
|
||||
RUN python -c "import torch_xla; print(torch_xla.__version__)"
|
||||
|
@ -27,7 +27,7 @@ local bertBaseCased = base.BaseTest {
|
||||
},
|
||||
command: utils.scriptCommand(
|
||||
|||
|
||||
python -m pytest -s transformers/examples/test_xla_examples.py -v
|
||||
python -m pytest -s transformers/examples/pytorch/test_xla_examples.py -v
|
||||
test_exit_code=$?
|
||||
echo "\nFinished running commands.\n"
|
||||
test $test_exit_code -eq 0
|
||||
|
@ -1,3 +1,19 @@
|
||||
<!---
|
||||
Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
-->
|
||||
|
||||
# Generating the documentation
|
||||
|
||||
To generate the documentation, you first have to build it. Several packages are necessary to build the doc,
|
||||
@ -10,7 +26,7 @@ pip install -e ".[docs]"
|
||||
---
|
||||
**NOTE**
|
||||
|
||||
You only need to generate the documentation to inspect it locally (if you're planning changes and want to
|
||||
You only need to generate the documentation to inspect it locally (if you're planning changes and want to
|
||||
check how they look like before committing for instance). You don't have to commit the built documentation.
|
||||
|
||||
---
|
||||
@ -49,7 +65,7 @@ make html
|
||||
```
|
||||
|
||||
A folder called ``_build/html`` should have been created. You can now open the file ``_build/html/index.html`` in your
|
||||
browser.
|
||||
browser.
|
||||
|
||||
---
|
||||
**NOTE**
|
||||
@ -79,15 +95,15 @@ following these steps:
|
||||
expand them).
|
||||
- Click on "details" next to the `ci/circleci: build_doc` check.
|
||||
- In the new window, click on the "Artifacts" tab.
|
||||
- Locate the file "docs/_build/html/index.html" (or any specific page you want to check) and click on it to get a
|
||||
- Locate the file "docs/_build/html/index.html" (or any specific page you want to check) and click on it to get a
|
||||
preview.
|
||||
|
||||
## Writing Documentation - Specification
|
||||
|
||||
The `huggingface/transformers` documentation follows the
|
||||
[Google documentation](https://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_google.html) style. It is
|
||||
mostly written in ReStructuredText
|
||||
([Sphinx simple documentation](https://www.sphinx-doc.org/en/master/usage/restructuredtext/index.html),
|
||||
mostly written in ReStructuredText
|
||||
([Sphinx simple documentation](https://www.sphinx-doc.org/en/master/usage/restructuredtext/index.html),
|
||||
[Sourceforge complete documentation](https://docutils.sourceforge.io/docs/ref/rst/restructuredtext.html)).
|
||||
|
||||
|
||||
@ -105,8 +121,8 @@ four.
|
||||
### Adding a new model
|
||||
|
||||
When adding a new model:
|
||||
|
||||
- Create a file `xxx.rst` under `./source/model_doc` (don't hesitate to copy an existing file as template).
|
||||
|
||||
- Create a file `xxx.rst` under `./source/model_doc` (don't hesitate to copy an existing file as template).
|
||||
- Link that file in `./source/index.rst` on the `model_doc` toc-tree.
|
||||
- Write a short overview of the model:
|
||||
- Overview with paper & authors
|
||||
@ -114,8 +130,8 @@ When adding a new model:
|
||||
- Tips and tricks and how to use it best
|
||||
- Add the classes that should be linked in the model. This generally includes the configuration, the tokenizer, and
|
||||
every model of that class (the base model, alongside models with additional heads), both in PyTorch and TensorFlow.
|
||||
The order is generally:
|
||||
- Configuration,
|
||||
The order is generally:
|
||||
- Configuration,
|
||||
- Tokenizer
|
||||
- PyTorch base model
|
||||
- PyTorch head models
|
||||
@ -163,7 +179,7 @@ Links should be done as so (note the double underscore at the end): \`text for t
|
||||
|
||||
#### Defining arguments in a method
|
||||
|
||||
Arguments should be defined with the `Args:` prefix, followed by a line return and an indentation.
|
||||
Arguments should be defined with the `Args:` prefix, followed by a line return and an indentation.
|
||||
The argument should be followed by its type, with its shape if it is a tensor, and a line return.
|
||||
Another indentation is necessary before writing the description of the argument.
|
||||
|
||||
@ -200,9 +216,9 @@ then its documentation should look like this:
|
||||
|
||||
Note that we always omit the "defaults to :obj:\`None\`" when None is the default for any argument. Also note that even
|
||||
if the first line describing your argument type and its default gets long, you can't break it on several lines. You can
|
||||
however write as many lines as you want in the indented description (see the example above with `input_ids`).
|
||||
however write as many lines as you want in the indented description (see the example above with `input_ids`).
|
||||
|
||||
#### Writing a multi-line code block
|
||||
#### Writing a multi-line code block
|
||||
|
||||
Multi-line code blocks can be useful for displaying examples. They are done like so:
|
||||
|
||||
@ -221,7 +237,7 @@ the results stay consistent with the library.
|
||||
|
||||
#### Writing a return block
|
||||
|
||||
Arguments should be defined with the `Args:` prefix, followed by a line return and an indentation.
|
||||
Arguments should be defined with the `Args:` prefix, followed by a line return and an indentation.
|
||||
The first line should be the type of the return, followed by a line return. No need to indent further for the elements
|
||||
building the return.
|
||||
|
||||
@ -242,3 +258,43 @@ Here's an example for a single value return:
|
||||
Returns:
|
||||
:obj:`List[int]`: A list of integers in the range [0, 1] --- 1 for a special token, 0 for a sequence token.
|
||||
```
|
||||
|
||||
#### Adding a new section
|
||||
|
||||
In ReST section headers are designated as such with the help of a line of underlying characters, e.g.,:
|
||||
|
||||
```
|
||||
Section 1
|
||||
^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Sub-section 1
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
```
|
||||
|
||||
ReST allows the use of any characters to designate different section levels, as long as they are used consistently within the same document. For details see [sections doc](https://www.sphinx-doc.org/en/master/usage/restructuredtext/basics.html#sections). Because there is no standard different documents often end up using different characters for the same levels which makes it very difficult to know which character to use when creating a new section.
|
||||
|
||||
Specifically, if when running `make docs` you get an error like:
|
||||
```
|
||||
docs/source/main_classes/trainer.rst:127:Title level inconsistent:
|
||||
```
|
||||
you picked an inconsistent character for some of the levels.
|
||||
|
||||
But how do you know which characters you must use for an already existing level or when adding a new level?
|
||||
|
||||
You can use this helper script:
|
||||
```
|
||||
perl -ne '/^(.)\1{100,}/ && do { $h{$1}=++$c if !$h{$1} }; END { %h = reverse %h ; print "$_ $h{$_}\n" for sort keys %h}' docs/source/main_classes/trainer.rst
|
||||
1 -
|
||||
2 ~
|
||||
3 ^
|
||||
4 =
|
||||
5 "
|
||||
```
|
||||
|
||||
This tells you which characters have already been assigned for each level.
|
||||
|
||||
So using this particular example's output -- if your current section's header uses `=` as its underline character, you now know you're at level 4, and if you want to add a sub-section header you know you want `"` as it'd level 5.
|
||||
|
||||
If you needed to add yet another sub-level, then pick a character that is not used already. That is you must pick a character that is not in the output of that script.
|
||||
|
||||
Here is the full list of characters that can be used in this context: `= - ` : ' " ~ ^ _ * + # < >`
|
||||
|
@ -2,6 +2,15 @@
|
||||
|
||||
/* Colab dropdown */
|
||||
|
||||
table.center-aligned-table td {
|
||||
text-align: center;
|
||||
}
|
||||
|
||||
table.center-aligned-table th {
|
||||
text-align: center;
|
||||
vertical-align: middle;
|
||||
}
|
||||
|
||||
.colab-dropdown {
|
||||
position: relative;
|
||||
display: inline-block;
|
||||
|
@ -1,14 +1,20 @@
|
||||
// These two things need to be updated at each release for the version selector.
|
||||
// Last stable version
|
||||
const stableVersion = "v3.5.0"
|
||||
// Dictionary doc folder to label
|
||||
const stableVersion = "v4.5.1"
|
||||
// Dictionary doc folder to label. The last stable version should have an empty key.
|
||||
const versionMapping = {
|
||||
"master": "master",
|
||||
"": "v3.5.0/v3.5.1",
|
||||
"": "v4.5.0/v4.5.1 (stable)",
|
||||
"v4.4.2": "v4.4.0/v4.4.1/v4.4.2",
|
||||
"v4.3.3": "v4.3.0/v4.3.1/v4.3.2/v4.3.3",
|
||||
"v4.2.2": "v4.2.0/v4.2.1/v4.2.2",
|
||||
"v4.1.1": "v4.1.0/v4.1.1",
|
||||
"v4.0.1": "v4.0.0/v4.0.1",
|
||||
"v3.5.1": "v3.5.0/v3.5.1",
|
||||
"v3.4.0": "v3.4.0",
|
||||
"v3.3.1": "v3.3.0/v3.3.1",
|
||||
"v3.2.0": "v3.2.0",
|
||||
"v3.1.0": "v3.1.0 (stable)",
|
||||
"v3.1.0": "v3.1.0",
|
||||
"v3.0.2": "v3.0.0/v3.0.1/v3.0.2",
|
||||
"v2.11.0": "v2.11.0",
|
||||
"v2.10.0": "v2.10.0",
|
||||
@ -57,7 +63,7 @@ function addIcon() {
|
||||
function addCustomFooter() {
|
||||
const customFooter = document.createElement("div");
|
||||
const questionOrIssue = document.createElement("div");
|
||||
questionOrIssue.innerHTML = "Stuck? Read our <a href='https://medium.com/huggingface'>Blog posts</a> or <a href='https://github.com/huggingface/transformers'>Create an issue</a>";
|
||||
questionOrIssue.innerHTML = "Stuck? Read our <a href='https://huggingface.co/blog'>Blog posts</a> or <a href='https://github.com/huggingface/transformers'>Create an issue</a>";
|
||||
customFooter.appendChild(questionOrIssue);
|
||||
customFooter.classList.add("footer");
|
||||
|
||||
@ -124,11 +130,11 @@ function addVersionControl() {
|
||||
const parts = location.toString().split('/');
|
||||
let versionIndex = parts.length - 2;
|
||||
// Index page may not have a last part with filename.html so we need to go up
|
||||
if (parts[parts.length - 1] != "" && ! parts[parts.length - 1].match(/\.html$|^search.html?/)) {
|
||||
if (parts[parts.length - 1] != "" && ! parts[parts.length - 1].match(/\.html/)) {
|
||||
versionIndex = parts.length - 1;
|
||||
}
|
||||
// Main classes and models are nested so we need to go deeper
|
||||
else if (parts[versionIndex] == "main_classes" || parts[versionIndex] == "model_doc") {
|
||||
else if (parts[versionIndex] == "main_classes" || parts[versionIndex] == "model_doc" || parts[versionIndex] == "internal") {
|
||||
versionIndex = versionIndex - 1;
|
||||
}
|
||||
const version = parts[versionIndex];
|
||||
|
844
docs/source/add_new_model.rst
Normal file
844
docs/source/add_new_model.rst
Normal file
@ -0,0 +1,844 @@
|
||||
..
|
||||
Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
|
||||
How to add a model to 🤗 Transformers?
|
||||
=======================================================================================================================
|
||||
|
||||
Adding a new model is often difficult and requires an in-depth knowledge of the 🤗 Transformers library and ideally also
|
||||
of the model's original repository. At Hugging Face, we are trying to empower the community more and more to add models
|
||||
independently. Thus, for some new models that the community wants to be added to 🤗 Transformers, we create a customized
|
||||
*call-for-model-addition* that explains step-by-step how to add the requested model. With this
|
||||
*call-for-model-addition*, we want to teach a motivated and experienced contributor of the community how to port a
|
||||
model to 🤗 Transformers.
|
||||
|
||||
If this sounds like something you would be interested in, feel free to check out the currently open
|
||||
“calls-for-model-addition” `here
|
||||
<https://github.com/huggingface/transformers/tree/master/templates/adding_a_new_model/open_model_proposals/README.md>`__
|
||||
and to contact us.
|
||||
|
||||
If selected, you will then work closely with one member of the Hugging Face team to integrate the model into 🤗
|
||||
Transformers. By doing so, you will both gain a theoretical and deep practical understanding of the proposed model. But
|
||||
more importantly, you will have made a major open-source contribution to 🤗 Transformers. Along the way, you will:
|
||||
|
||||
- get insights into open-source best practices
|
||||
- understand the design principles of one of the most popular NLP libraries
|
||||
- learn how to do efficiently test large NLP models
|
||||
- learn how to integrate Python utilities like ``black``, ``isort``, ``make fix-copies`` into a library to always
|
||||
ensure clean and readable code
|
||||
|
||||
We are also more than happy if you want to add a model that cannot be found in the “calls-for-model-addition” folder.
|
||||
The following sections explain in detail how to add a new model. It might also be very helpful to check out already
|
||||
added models to see if those resemble the model you would like to add `here
|
||||
<https://github.com/huggingface/transformers/pulls?q=is%3Apr+label%3A%22PR+for+Model+Addition%22+is%3Aclosed>`__.
|
||||
|
||||
To start, let's try to get a general overview of the Transformers library.
|
||||
|
||||
General overview of 🤗 Transformers
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
First, you should get a general overview of 🤗 Transformers. 🤗 Transformers is a very opinionated library, so there is a
|
||||
chance that you don't agree with some of the library's philosophies or design choices. From our experience, however, we
|
||||
found that the fundamental design choices and philosophies of the library are crucial to efficiently scale 🤗
|
||||
Transformers while keeping maintenance costs at a reasonable level.
|
||||
|
||||
A good first starting point to better understand the library is to read the :doc:`documentation of our philosophy
|
||||
<philosophy>`. As a result of our way of working, there are some choices that we try to apply to all models:
|
||||
|
||||
- Composition is generally favored over-abstraction
|
||||
- Duplicating code is not always bad if it strongly improves the readability or accessibility of a model
|
||||
- Model files are as self-contained as possible so that when you read the code of a specific model, you ideally only
|
||||
have to look into the respective ``modeling_....py`` file.
|
||||
|
||||
In our opinion, the library's code is not just a means to provide a product, *e.g.* the ability to use BERT for
|
||||
inference, but also as the very product that we want to improve. Hence, when adding a model, the user is not only the
|
||||
person that will use your model, but also everybody that will read, try to understand, and possibly tweak your code.
|
||||
|
||||
With this in mind, let's go a bit deeper into the general library design.
|
||||
|
||||
Overview of models
|
||||
-----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
To successfully add a model, it is important to understand the interaction between your model and its config,
|
||||
:class:`~transformers.PreTrainedModel`, and :class:`~transformers.PretrainedConfig`. For exemplary purposes, we will
|
||||
call the model to be added to 🤗 Transformers ``BrandNewBert``.
|
||||
|
||||
Let's take a look:
|
||||
|
||||
.. image:: ./imgs/transformers_overview.png
|
||||
|
||||
As you can see, we do make use of inheritance in 🤗 Transformers, but we keep the level of abstraction to an absolute
|
||||
minimum. There are never more than two levels of abstraction for any model in the library. :obj:`BrandNewBertModel`
|
||||
inherits from :obj:`BrandNewBertPreTrainedModel` which in turn inherits from :class:`~transformres.PreTrainedModel` and
|
||||
that's it. As a general rule, we want to make sure that a new model only depends on
|
||||
:class:`~transformers.PreTrainedModel`. The important functionalities that are automatically provided to every new
|
||||
model are :meth:`~transformers.PreTrainedModel.from_pretrained` and
|
||||
:meth:`~transformers.PreTrainedModel.save_pretrained`, which are used for serialization and deserialization. All of the
|
||||
other important functionalities, such as :meth:`BrandNewBertModel.forward` should be completely defined in the new
|
||||
``modeling_brand_new_bert.py`` script. Next, we want to make sure that a model with a specific head layer, such as
|
||||
:obj:`BrandNewBertForMaskedLM` does not inherit from :obj:`BrandNewBertModel`, but rather uses :obj:`BrandNewBertModel`
|
||||
as a component that can be called in its forward pass to keep the level of abstraction low. Every new model requires a
|
||||
configuration class, called :obj:`BrandNewBertConfig`. This configuration is always stored as an attribute in
|
||||
:class:`~transformers.PreTrainedModel`, and thus can be accessed via the ``config`` attribute for all classes
|
||||
inheriting from :obj:`BrandNewBertPreTrainedModel`:
|
||||
|
||||
.. code:: python
|
||||
|
||||
model = BrandNewBertModel.from_pretrained("brandy/brand_new_bert")
|
||||
model.config # model has access to its config
|
||||
|
||||
Similar to the model, the configuration inherits basic serialization and deserialization functionalities from
|
||||
:class:`~transformers.PretrainedConfig`. Note that the configuration and the model are always serialized into two
|
||||
different formats - the model to a `pytorch_model.bin` file and the configuration to a `config.json` file. Calling
|
||||
:meth:`~transformers.PreTrainedModel.save_pretrained` will automatically call
|
||||
:meth:`~transformers.PretrainedConfig.save_pretrained`, so that both model and configuration are saved.
|
||||
|
||||
|
||||
Overview of tokenizers
|
||||
-----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
Not quite ready yet :-( This section will be added soon!
|
||||
|
||||
Step-by-step recipe to add a model to 🤗 Transformers
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Everyone has different preferences of how to port a model so it can be very helpful for you to take a look at summaries
|
||||
of how other contributors ported models to Hugging Face. Here is a list of community blog posts on how to port a model:
|
||||
|
||||
1. `Porting GPT2 Model <https://medium.com/huggingface/from-tensorflow-to-pytorch-265f40ef2a28>`__ by `Thomas
|
||||
<https://huggingface.co/thomwolf>`__
|
||||
2. `Porting WMT19 MT Model <https://huggingface.co/blog/porting-fsmt>`__ by `Stas <https://huggingface.co/stas>`__
|
||||
|
||||
From experience, we can tell you that the most important things to keep in mind when adding a model are:
|
||||
|
||||
- Don't reinvent the wheel! Most parts of the code you will add for the new 🤗 Transformers model already exist
|
||||
somewhere in 🤗 Transformers. Take some time to find similar, already existing models and tokenizers you can copy
|
||||
from. `grep <https://www.gnu.org/software/grep/>`__ and `rg <https://github.com/BurntSushi/ripgrep>`__ are your
|
||||
friends. Note that it might very well happen that your model's tokenizer is based on one model implementation, and
|
||||
your model's modeling code on another one. *E.g.* FSMT's modeling code is based on BART, while FSMT's tokenizer code
|
||||
is based on XLM.
|
||||
- It's more of an engineering challenge than a scientific challenge. You should spend more time on creating an
|
||||
efficient debugging environment than trying to understand all theoretical aspects of the model in the paper.
|
||||
- Ask for help, when you're stuck! Models are the core component of 🤗 Transformers so that we at Hugging Face are more
|
||||
than happy to help you at every step to add your model. Don't hesitate to ask if you notice you are not making
|
||||
progress.
|
||||
|
||||
In the following, we try to give you a general recipe that we found most useful when porting a model to 🤗 Transformers.
|
||||
|
||||
The following list is a summary of everything that has to be done to add a model and can be used by you as a To-Do
|
||||
List:
|
||||
|
||||
- 1. ☐ (Optional) Understood theoretical aspects
|
||||
- 2. ☐ Prepared transformers dev environment
|
||||
- 3. ☐ Set up debugging environment of the original repository
|
||||
- 4. ☐ Created script that successfully runs forward pass using original repository and checkpoint
|
||||
- 5. ☐ Successfully added the model skeleton to Transformers
|
||||
- 6. ☐ Successfully converted original checkpoint to Transformers checkpoint
|
||||
- 7. ☐ Successfully ran forward pass in Transformers that gives identical output to original checkpoint
|
||||
- 8. ☐ Finished model tests in Transformers
|
||||
- 9. ☐ Successfully added Tokenizer in Transformers
|
||||
- 10. ☐ Run end-to-end integration tests
|
||||
- 11. ☐ Finished docs
|
||||
- 12. ☐ Uploaded model weights to the hub
|
||||
- 13. ☐ Submitted the pull request
|
||||
- 14. ☐ (Optional) Added a demo notebook
|
||||
|
||||
To begin with, we usually recommend to start by getting a good theoretical understanding of ``BrandNewBert``. However,
|
||||
if you prefer to understand the theoretical aspects of the model *on-the-job*, then it is totally fine to directly dive
|
||||
into the ``BrandNewBert``'s code-base. This option might suit you better, if your engineering skills are better than
|
||||
your theoretical skill, if you have trouble understanding ``BrandNewBert``'s paper, or if you just enjoy programming
|
||||
much more than reading scientific papers.
|
||||
|
||||
1. (Optional) Theoretical aspects of BrandNewBert
|
||||
-----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
You should take some time to read *BrandNewBert's* paper, if such descriptive work exists. There might be large
|
||||
sections of the paper that are difficult to understand. If this is the case, this is fine - don't worry! The goal is
|
||||
not to get a deep theoretical understanding of the paper, but to extract the necessary information required to
|
||||
effectively re-implement the model in 🤗 Transformers. That being said, you don't have to spend too much time on the
|
||||
theoretical aspects, but rather focus on the practical ones, namely:
|
||||
|
||||
- What type of model is *brand_new_bert*? BERT-like encoder-only model? GPT2-like decoder-only model? BART-like
|
||||
encoder-decoder model? Look at the :doc:`model_summary` if you're not familiar with the differences between those.
|
||||
- What are the applications of *brand_new_bert*? Text classification? Text generation? Seq2Seq tasks, *e.g.,*
|
||||
summarization?
|
||||
- What is the novel feature of the model making it different from BERT/GPT-2/BART?
|
||||
- Which of the already existing `🤗 Transformers models <https://huggingface.co/transformers/#contents>`__ is most
|
||||
similar to *brand_new_bert*?
|
||||
- What type of tokenizer is used? A sentencepiece tokenizer? Word piece tokenizer? Is it the same tokenizer as used
|
||||
for BERT or BART?
|
||||
|
||||
After you feel like you have gotten a good overview of the architecture of the model, you might want to write to the
|
||||
Hugging Face team with any questions you might have. This might include questions regarding the model's architecture,
|
||||
its attention layer, etc. We will be more than happy to help you.
|
||||
|
||||
2. Next prepare your environment
|
||||
-----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
1. Fork the `repository <https://github.com/huggingface/transformers>`__ by clicking on the ‘Fork' button on the
|
||||
repository's page. This creates a copy of the code under your GitHub user account.
|
||||
|
||||
2. Clone your ``transformers`` fork to your local disk, and add the base repository as a remote:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
git clone https://github.com/[your Github handle]/transformers.git
|
||||
cd transformers
|
||||
git remote add upstream https://github.com/huggingface/transformers.git
|
||||
|
||||
3. Set up a development environment, for instance by running the following command:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
python -m venv .env
|
||||
source .env/bin/activate
|
||||
pip install -e ".[dev]"
|
||||
|
||||
and return to the parent directory
|
||||
|
||||
.. code:: bash
|
||||
|
||||
cd ..
|
||||
|
||||
4. We recommend adding the PyTorch version of *brand_new_bert* to Transformers. To install PyTorch, please follow the
|
||||
instructions on https://pytorch.org/get-started/locally/.
|
||||
|
||||
**Note:** You don't need to have CUDA installed. Making the new model work on CPU is sufficient.
|
||||
|
||||
5. To port *brand_new_bert*, you will also need access to its original repository:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
git clone https://github.com/org_that_created_brand_new_bert_org/brand_new_bert.git
|
||||
cd brand_new_bert
|
||||
pip install -e .
|
||||
|
||||
Now you have set up a development environment to port *brand_new_bert* to 🤗 Transformers.
|
||||
|
||||
3.-4. Run a pretrained checkpoint using the original repository
|
||||
-----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
At first, you will work on the original *brand_new_bert* repository. Often, the original implementation is very
|
||||
“researchy”. Meaning that documentation might be lacking and the code can be difficult to understand. But this should
|
||||
be exactly your motivation to reimplement *brand_new_bert*. At Hugging Face, one of our main goals is to *make people
|
||||
stand on the shoulders of giants* which translates here very well into taking a working model and rewriting it to make
|
||||
it as **accessible, user-friendly, and beautiful** as possible. This is the number-one motivation to re-implement
|
||||
models into 🤗 Transformers - trying to make complex new NLP technology accessible to **everybody**.
|
||||
|
||||
You should start thereby by diving into the original repository.
|
||||
|
||||
Successfully running the official pretrained model in the original repository is often **the most difficult** step.
|
||||
From our experience, it is very important to spend some time getting familiar with the original code-base. You need to
|
||||
figure out the following:
|
||||
|
||||
- Where to find the pretrained weights?
|
||||
- How to load the pretrained weights into the corresponding model?
|
||||
- How to run the tokenizer independently from the model?
|
||||
- Trace one forward pass so that you know which classes and functions are required for a simple forward pass. Usually,
|
||||
you only have to reimplement those functions.
|
||||
- Be able to locate the important components of the model: Where is the model's class? Are there model sub-classes,
|
||||
*e.g.* EncoderModel, DecoderModel? Where is the self-attention layer? Are there multiple different attention layers,
|
||||
*e.g.* *self-attention*, *cross-attention*...?
|
||||
- How can you debug the model in the original environment of the repo? Do you have to add `print` statements, can you
|
||||
work with an interactive debugger like `ipdb`, or should you use an efficient IDE to debug the model, like PyCharm?
|
||||
|
||||
It is very important that before you start the porting process, that you can **efficiently** debug code in the original
|
||||
repository! Also, remember that you are working with an open-source library, so do not hesitate to open an issue, or
|
||||
even a pull request in the original repository. The maintainers of this repository are most likely very happy about
|
||||
someone looking into their code!
|
||||
|
||||
At this point, it is really up to you which debugging environment and strategy you prefer to use to debug the original
|
||||
model. We strongly advise against setting up a costly GPU environment, but simply work on a CPU both when starting to
|
||||
dive into the original repository and also when starting to write the 🤗 Transformers implementation of the model. Only
|
||||
at the very end, when the model has already been successfully ported to 🤗 Transformers, one should verify that the
|
||||
model also works as expected on GPU.
|
||||
|
||||
In general, there are two possible debugging environments for running the original model
|
||||
|
||||
- `Jupyter notebooks <https://jupyter.org/>`__ / `google colab
|
||||
<https://colab.research.google.com/notebooks/intro.ipynb>`__
|
||||
- Local python scripts.
|
||||
|
||||
Jupyter notebooks have the advantage that they allow for cell-by-cell execution which can be helpful to better split
|
||||
logical components from one another and to have faster debugging cycles as intermediate results can be stored. Also,
|
||||
notebooks are often easier to share with other contributors, which might be very helpful if you want to ask the Hugging
|
||||
Face team for help. If you are familiar with Jupiter notebooks, we strongly recommend you to work with them.
|
||||
|
||||
The obvious disadvantage of Jupyther notebooks is that if you are not used to working with them you will have to spend
|
||||
some time adjusting to the new programming environment and that you might not be able to use your known debugging tools
|
||||
anymore, like ``ipdb``.
|
||||
|
||||
For each code-base, a good first step is always to load a **small** pretrained checkpoint and to be able to reproduce a
|
||||
single forward pass using a dummy integer vector of input IDs as an input. Such a script could look like this (in
|
||||
pseudocode):
|
||||
|
||||
.. code:: bash
|
||||
|
||||
model = BrandNewBertModel.load_pretrained_checkpoint(/path/to/checkpoint/)
|
||||
input_ids = [0, 4, 5, 2, 3, 7, 9] # vector of input ids
|
||||
original_output = model.predict(input_ids)
|
||||
|
||||
Next, regarding the debugging strategy, there are generally a few from which to choose from:
|
||||
|
||||
- Decompose the original model into many small testable components and run a forward pass on each of those for
|
||||
verification
|
||||
- Decompose the original model only into the original *tokenizer* and the original *model*, run a forward pass on
|
||||
those, and use intermediate print statements or breakpoints for verification
|
||||
|
||||
Again, it is up to you which strategy to choose. Often, one or the other is advantageous depending on the original code
|
||||
base.
|
||||
|
||||
If the original code-base allows you to decompose the model into smaller sub-components, *e.g.* if the original
|
||||
code-base can easily be run in eager mode, it is usually worth the effort to do so. There are some important advantages
|
||||
to taking the more difficult road in the beginning:
|
||||
|
||||
- at a later stage when comparing the original model to the Hugging Face implementation, you can verify automatically
|
||||
for each component individually that the corresponding component of the 🤗 Transformers implementation matches instead
|
||||
of relying on visual comparison via print statements
|
||||
- it can give you some rope to decompose the big problem of porting a model into smaller problems of just porting
|
||||
individual components and thus structure your work better
|
||||
- separating the model into logical meaningful components will help you to get a better overview of the model's design
|
||||
and thus to better understand the model
|
||||
- at a later stage those component-by-component tests help you to ensure that no regression occurs as you continue
|
||||
changing your code
|
||||
|
||||
`Lysandre's <https://gist.github.com/LysandreJik/db4c948f6b4483960de5cbac598ad4ed>`__ integration checks for ELECTRA
|
||||
gives a nice example of how this can be done.
|
||||
|
||||
However, if the original code-base is very complex or only allows intermediate components to be run in a compiled mode,
|
||||
it might be too time-consuming or even impossible to separate the model into smaller testable sub-components. A good
|
||||
example is `T5's MeshTensorFlow <https://github.com/tensorflow/mesh/tree/master/mesh_tensorflow>`__ library which is
|
||||
very complex and does not offer a simple way to decompose the model into its sub-components. For such libraries, one
|
||||
often relies on verifying print statements.
|
||||
|
||||
No matter which strategy you choose, the recommended procedure is often the same in that you should start to debug the
|
||||
starting layers first and the ending layers last.
|
||||
|
||||
It is recommended that you retrieve the output, either by print statements or sub-component functions, of the following
|
||||
layers in the following order:
|
||||
|
||||
1. Retrieve the input IDs passed to the model
|
||||
2. Retrieve the word embeddings
|
||||
3. Retrieve the input of the first Transformer layer
|
||||
4. Retrieve the output of the first Transformer layer
|
||||
5. Retrieve the output of the following n - 1 Transformer layers
|
||||
6. Retrieve the output of the whole BrandNewBert Model
|
||||
|
||||
Input IDs should thereby consists of an array of integers, *e.g.* ``input_ids = [0, 4, 4, 3, 2, 4, 1, 7, 19]``
|
||||
|
||||
The outputs of the following layers often consist of multi-dimensional float arrays and can look like this:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
[[
|
||||
[-0.1465, -0.6501, 0.1993, ..., 0.1451, 0.3430, 0.6024],
|
||||
[-0.4417, -0.5920, 0.3450, ..., -0.3062, 0.6182, 0.7132],
|
||||
[-0.5009, -0.7122, 0.4548, ..., -0.3662, 0.6091, 0.7648],
|
||||
...,
|
||||
[-0.5613, -0.6332, 0.4324, ..., -0.3792, 0.7372, 0.9288],
|
||||
[-0.5416, -0.6345, 0.4180, ..., -0.3564, 0.6992, 0.9191],
|
||||
[-0.5334, -0.6403, 0.4271, ..., -0.3339, 0.6533, 0.8694]]],
|
||||
|
||||
We expect that every model added to 🤗 Transformers passes a couple of integration tests, meaning that the original
|
||||
model and the reimplemented version in 🤗 Transformers have to give the exact same output up to a precision of 0.001!
|
||||
Since it is normal that the exact same model written in different libraries can give a slightly different output
|
||||
depending on the library framework, we accept an error tolerance of 1e-3 (0.001). It is not enough if the model gives
|
||||
nearly the same output, they have to be the almost identical. Therefore, you will certainly compare the intermediate
|
||||
outputs of the 🤗 Transformers version multiple times against the intermediate outputs of the original implementation of
|
||||
*brand_new_bert* in which case an **efficient** debugging environment of the original repository is absolutely
|
||||
important. Here is some advice is to make your debugging environment as efficient as possible.
|
||||
|
||||
- Find the best way of debugging intermediate results. Is the original repository written in PyTorch? Then you should
|
||||
probably take the time to write a longer script that decomposes the original model into smaller sub-components to
|
||||
retrieve intermediate values. Is the original repository written in Tensorflow 1? Then you might have to rely on
|
||||
TensorFlow print operations like `tf.print <https://www.tensorflow.org/api_docs/python/tf/print>`__ to output
|
||||
intermediate values. Is the original repository written in Jax? Then make sure that the model is **not jitted** when
|
||||
running the forward pass, *e.g.* check-out `this link <https://github.com/google/jax/issues/196>`__.
|
||||
- Use the smallest pretrained checkpoint you can find. The smaller the checkpoint, the faster your debug cycle
|
||||
becomes. It is not efficient if your pretrained model is so big that your forward pass takes more than 10 seconds.
|
||||
In case only very large checkpoints are available, it might make more sense to create a dummy model in the new
|
||||
environment with randomly initialized weights and save those weights for comparison with the 🤗 Transformers version
|
||||
of your model
|
||||
- Make sure you are using the easiest way of calling a forward pass in the original repository. Ideally, you want to
|
||||
find the function in the original repository that **only** calls a single forward pass, *i.e.* that is often called
|
||||
``predict``, ``evaluate``, ``forward`` or ``__call__``. You don't want to debug a function that calls ``forward``
|
||||
multiple times, *e.g.* to generate text, like ``autoregressive_sample``, ``generate``.
|
||||
- Try to separate the tokenization from the model's `forward` pass. If the original repository shows examples where
|
||||
you have to input a string, then try to find out where in the forward call the string input is changed to input ids
|
||||
and start from this point. This might mean that you have to possibly write a small script yourself or change the
|
||||
original code so that you can directly input the ids instead of an input string.
|
||||
- Make sure that the model in your debugging setup is **not** in training mode, which often causes the model to yield
|
||||
random outputs due to multiple dropout layers in the model. Make sure that the forward pass in your debugging
|
||||
environment is **deterministic** so that the dropout layers are not used. Or use `transformers.file_utils.set_seed`
|
||||
if the old and new implementations are in the same framework.
|
||||
|
||||
The following section gives you more specific details/tips on how you can do this for *brand_new_bert*.
|
||||
|
||||
5.-14. Port BrandNewBert to 🤗 Transformers
|
||||
-----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
Next, you can finally start adding new code to 🤗 Transformers. Go into the clone of your 🤗 Transformers' fork:
|
||||
|
||||
::
|
||||
|
||||
cd transformers
|
||||
|
||||
In the special case that you are adding a model whose architecture exactly matches the model architecture of an
|
||||
existing model you only have to add a conversion script as described in `this section <#write-a-conversion-script>`__.
|
||||
In this case, you can just re-use the whole model architecture of the already existing model.
|
||||
|
||||
Otherwise, let's start generating a new model with the amazing Cookiecutter!
|
||||
|
||||
**Use the Cookiecutter to automatically generate the model's code**
|
||||
|
||||
To begin with head over to the `🤗 Transformers templates
|
||||
<https://github.com/huggingface/transformers/tree/master/templates/adding_a_new_model>`__ to make use of our
|
||||
``cookiecutter`` implementation to automatically generate all the relevant files for your model. Again, we recommend
|
||||
only adding the PyTorch version of the model at first. Make sure you follow the instructions of the ``README.md`` on
|
||||
the `🤗 Transformers templates <https://github.com/huggingface/transformers/tree/master/templates/adding_a_new_model>`__
|
||||
carefully.
|
||||
|
||||
**Open a Pull Request on the main huggingface/transformers repo**
|
||||
|
||||
Before starting to adapt the automatically generated code, now is the time to open a “Work in progress (WIP)” pull
|
||||
request, *e.g.* “[WIP] Add *brand_new_bert*”, in 🤗 Transformers so that you and the Hugging Face team can work
|
||||
side-by-side on integrating the model into 🤗 Transformers.
|
||||
|
||||
You should do the following:
|
||||
|
||||
1. Create a branch with a descriptive name from your master branch
|
||||
|
||||
::
|
||||
|
||||
git checkout -b add_brand_new_bert
|
||||
|
||||
2. Commit the automatically generated code:
|
||||
|
||||
::
|
||||
|
||||
git add .
|
||||
git commit
|
||||
|
||||
3. Fetch and rebase to current master
|
||||
|
||||
::
|
||||
|
||||
git fetch upstream
|
||||
git rebase upstream/master
|
||||
|
||||
4. Push the changes to your account using:
|
||||
|
||||
::
|
||||
|
||||
git push -u origin a-descriptive-name-for-my-changes
|
||||
|
||||
5. Once you are satisfied, go to the webpage of your fork on GitHub. Click on “Pull request”. Make sure to add the
|
||||
GitHub handle of some members of the Hugging Face team as reviewers, so that the Hugging Face team gets notified for
|
||||
future changes.
|
||||
|
||||
6. Change the PR into a draft by clicking on “Convert to draft” on the right of the GitHub pull request web page.
|
||||
|
||||
In the following, whenever you have done some progress, don't forget to commit your work and push it to your account so
|
||||
that it shows in the pull request. Additionally, you should make sure to update your work with the current master from
|
||||
time to time by doing:
|
||||
|
||||
::
|
||||
|
||||
git fetch upstream
|
||||
git merge upstream/master
|
||||
|
||||
In general, all questions you might have regarding the model or your implementation should be asked in your PR and
|
||||
discussed/solved in the PR. This way, the Hugging Face team will always be notified when you are committing new code or
|
||||
if you have a question. It is often very helpful to point the Hugging Face team to your added code so that the Hugging
|
||||
Face team can efficiently understand your problem or question.
|
||||
|
||||
To do so, you can go to the “Files changed” tab where you see all of your changes, go to a line regarding which you
|
||||
want to ask a question, and click on the “+” symbol to add a comment. Whenever a question or problem has been solved,
|
||||
you can click on the “Resolve” button of the created comment.
|
||||
|
||||
In the same way, the Hugging Face team will open comments when reviewing your code. We recommend asking most questions
|
||||
on GitHub on your PR. For some very general questions that are not very useful for the public, feel free to ping the
|
||||
Hugging Face team by Slack or email.
|
||||
|
||||
**5. Adapt the generated models code for brand_new_bert**
|
||||
|
||||
At first, we will focus only on the model itself and not care about the tokenizer. All the relevant code should be
|
||||
found in the generated files ``src/transformers/models/brand_new_bert/modeling_brand_new_bert.py`` and
|
||||
``src/transformers/models/brand_new_bert/configuration_brand_new_bert.py``.
|
||||
|
||||
Now you can finally start coding :). The generated code in
|
||||
``src/transformers/models/brand_new_bert/modeling_brand_new_bert.py`` will either have the same architecture as BERT if
|
||||
it's an encoder-only model or BART if it's an encoder-decoder model. At this point, you should remind yourself what
|
||||
you've learned in the beginning about the theoretical aspects of the model: *How is the model different from BERT or
|
||||
BART?*". Implement those changes which often means to change the *self-attention* layer, the order of the normalization
|
||||
layer, etc… Again, it is often useful to look at the similar architecture of already existing models in Transformers to
|
||||
get a better feeling of how your model should be implemented.
|
||||
|
||||
**Note** that at this point, you don't have to be very sure that your code is fully correct or clean. Rather, it is
|
||||
advised to add a first *unclean*, copy-pasted version of the original code to
|
||||
``src/transformers/models/brand_new_bert/modeling_brand_new_bert.py`` until you feel like all the necessary code is
|
||||
added. From our experience, it is much more efficient to quickly add a first version of the required code and
|
||||
improve/correct the code iteratively with the conversion script as described in the next section. The only thing that
|
||||
has to work at this point is that you can instantiate the 🤗 Transformers implementation of *brand_new_bert*, *i.e.* the
|
||||
following command should work:
|
||||
|
||||
.. code:: python
|
||||
|
||||
from transformers import BrandNewBertModel, BrandNewBertConfig
|
||||
model = BrandNewBertModel(BrandNewBertConfig())
|
||||
|
||||
The above command will create a model according to the default parameters as defined in ``BrandNewBertConfig()`` with
|
||||
random weights, thus making sure that the ``init()`` methods of all components works.
|
||||
|
||||
**6. Write a conversion script**
|
||||
|
||||
Next, you should write a conversion script that lets you convert the checkpoint you used to debug *brand_new_bert* in
|
||||
the original repository to a checkpoint compatible with your just created 🤗 Transformers implementation of
|
||||
*brand_new_bert*. It is not advised to write the conversion script from scratch, but rather to look through already
|
||||
existing conversion scripts in 🤗 Transformers for one that has been used to convert a similar model that was written in
|
||||
the same framework as *brand_new_bert*. Usually, it is enough to copy an already existing conversion script and
|
||||
slightly adapt it for your use case. Don't hesitate to ask the Hugging Face team to point you to a similar already
|
||||
existing conversion script for your model.
|
||||
|
||||
- If you are porting a model from TensorFlow to PyTorch, a good starting point might be BERT's conversion script `here
|
||||
<https://github.com/huggingface/transformers/blob/7acfa95afb8194f8f9c1f4d2c6028224dbed35a2/src/transformers/models/bert/modeling_bert.py#L91>`__
|
||||
- If you are porting a model from PyTorch to PyTorch, a good starting point might be BART's conversion script `here
|
||||
<https://github.com/huggingface/transformers/blob/master/src/transformers/models/bart/convert_bart_original_pytorch_checkpoint_to_pytorch.py>`__
|
||||
|
||||
In the following, we'll quickly explain how PyTorch models store layer weights and define layer names. In PyTorch, the
|
||||
name of a layer is defined by the name of the class attribute you give the layer. Let's define a dummy model in
|
||||
PyTorch, called ``SimpleModel`` as follows:
|
||||
|
||||
.. code:: python
|
||||
|
||||
import torch.nn as nn
|
||||
|
||||
class SimpleModel(nn.Module):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.dense = nn.Linear(10, 10)
|
||||
self.intermediate = nn.Linear(10, 10)
|
||||
self.layer_norm = nn.LayerNorm(10)
|
||||
|
||||
Now we can create an instance of this model definition which will fill all weights: ``dense``, ``intermediate``,
|
||||
``layer_norm`` with random weights. We can print the model to see its architecture
|
||||
|
||||
.. code:: python
|
||||
|
||||
model = SimpleModel()
|
||||
|
||||
print(model)
|
||||
|
||||
This will print out the following:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
SimpleModel(
|
||||
(dense): Linear(in_features=10, out_features=10, bias=True)
|
||||
(intermediate): Linear(in_features=10, out_features=10, bias=True)
|
||||
(layer_norm): LayerNorm((10,), eps=1e-05, elementwise_affine=True)
|
||||
)
|
||||
|
||||
We can see that the layer names are defined by the name of the class attribute in PyTorch. You can print out the weight
|
||||
values of a specific layer:
|
||||
|
||||
.. code:: python
|
||||
|
||||
print(model.dense.weight.data)
|
||||
|
||||
to see that the weights were randomly initialized
|
||||
|
||||
.. code:: bash
|
||||
|
||||
tensor([[-0.0818, 0.2207, -0.0749, -0.0030, 0.0045, -0.1569, -0.1598, 0.0212,
|
||||
-0.2077, 0.2157],
|
||||
[ 0.1044, 0.0201, 0.0990, 0.2482, 0.3116, 0.2509, 0.2866, -0.2190,
|
||||
0.2166, -0.0212],
|
||||
[-0.2000, 0.1107, -0.1999, -0.3119, 0.1559, 0.0993, 0.1776, -0.1950,
|
||||
-0.1023, -0.0447],
|
||||
[-0.0888, -0.1092, 0.2281, 0.0336, 0.1817, -0.0115, 0.2096, 0.1415,
|
||||
-0.1876, -0.2467],
|
||||
[ 0.2208, -0.2352, -0.1426, -0.2636, -0.2889, -0.2061, -0.2849, -0.0465,
|
||||
0.2577, 0.0402],
|
||||
[ 0.1502, 0.2465, 0.2566, 0.0693, 0.2352, -0.0530, 0.1859, -0.0604,
|
||||
0.2132, 0.1680],
|
||||
[ 0.1733, -0.2407, -0.1721, 0.1484, 0.0358, -0.0633, -0.0721, -0.0090,
|
||||
0.2707, -0.2509],
|
||||
[-0.1173, 0.1561, 0.2945, 0.0595, -0.1996, 0.2988, -0.0802, 0.0407,
|
||||
0.1829, -0.1568],
|
||||
[-0.1164, -0.2228, -0.0403, 0.0428, 0.1339, 0.0047, 0.1967, 0.2923,
|
||||
0.0333, -0.0536],
|
||||
[-0.1492, -0.1616, 0.1057, 0.1950, -0.2807, -0.2710, -0.1586, 0.0739,
|
||||
0.2220, 0.2358]]).
|
||||
|
||||
In the conversion script, you should fill those randomly initialized weights with the exact weights of the
|
||||
corresponding layer in the checkpoint. *E.g.*
|
||||
|
||||
.. code:: python
|
||||
|
||||
# retrieve matching layer weights, e.g. by
|
||||
# recursive algorithm
|
||||
layer_name = "dense"
|
||||
pretrained_weight = array_of_dense_layer
|
||||
|
||||
model_pointer = getattr(model, "dense")
|
||||
|
||||
model_pointer.weight.data = torch.from_numpy(pretrained_weight)
|
||||
|
||||
While doing so, you must verify that each randomly initialized weight of your PyTorch model and its corresponding
|
||||
pretrained checkpoint weight exactly match in both **shape and name**. To do so, it is **necessary** to add assert
|
||||
statements for the shape and print out the names of the checkpoints weights. E.g. you should add statements like:
|
||||
|
||||
.. code:: python
|
||||
|
||||
assert (
|
||||
model_pointer.weight.shape == pretrained_weight.shape
|
||||
), f"Pointer shape of random weight {model_pointer.shape} and array shape of checkpoint weight {pretrained_weight.shape} mismatched"
|
||||
|
||||
Besides, you should also print out the names of both weights to make sure they match, *e.g.*
|
||||
|
||||
.. code:: python
|
||||
|
||||
logger.info(f"Initialize PyTorch weight {layer_name} from {pretrained_weight.name}")
|
||||
|
||||
If either the shape or the name doesn't match, you probably assigned the wrong checkpoint weight to a randomly
|
||||
initialized layer of the 🤗 Transformers implementation.
|
||||
|
||||
An incorrect shape is most likely due to an incorrect setting of the config parameters in ``BrandNewBertConfig()`` that
|
||||
do not exactly match those that were used for the checkpoint you want to convert. However, it could also be that
|
||||
PyTorch's implementation of a layer requires the weight to be transposed beforehand.
|
||||
|
||||
Finally, you should also check that **all** required weights are initialized and print out all checkpoint weights that
|
||||
were not used for initialization to make sure the model is correctly converted. It is completely normal, that the
|
||||
conversion trials fail with either a wrong shape statement or wrong name assignment. This is most likely because either
|
||||
you used incorrect parameters in ``BrandNewBertConfig()``, have a wrong architecture in the 🤗 Transformers
|
||||
implementation, you have a bug in the ``init()`` functions of one of the components of the 🤗 Transformers
|
||||
implementation or you need to transpose one of the checkpoint weights.
|
||||
|
||||
This step should be iterated with the previous step until all weights of the checkpoint are correctly loaded in the
|
||||
Transformers model. Having correctly loaded the checkpoint into the 🤗 Transformers implementation, you can then save
|
||||
the model under a folder of your choice ``/path/to/converted/checkpoint/folder`` that should then contain both a
|
||||
``pytorch_model.bin`` file and a ``config.json`` file:
|
||||
|
||||
.. code:: python
|
||||
|
||||
model.save_pretrained("/path/to/converted/checkpoint/folder")
|
||||
|
||||
**7. Implement the forward pass**
|
||||
|
||||
Having managed to correctly load the pretrained weights into the 🤗 Transformers implementation, you should now make
|
||||
sure that the forward pass is correctly implemented. In `Get familiar with the original repository
|
||||
<#run-a-pretrained-checkpoint-using-the-original-repository>`__, you have already created a script that runs a forward
|
||||
pass of the model using the original repository. Now you should write an analogous script using the 🤗 Transformers
|
||||
implementation instead of the original one. It should look as follows:
|
||||
|
||||
.. code:: python
|
||||
|
||||
model = BrandNewBertModel.from_pretrained(/path/to/converted/checkpoint/folder)
|
||||
input_ids = [0, 4, 4, 3, 2, 4, 1, 7, 19]
|
||||
output = model(input_ids).last_hidden_states
|
||||
|
||||
It is very likely that the 🤗 Transformers implementation and the original model implementation don't give the exact
|
||||
same output the very first time or that the forward pass throws an error. Don't be disappointed - it's expected! First,
|
||||
you should make sure that the forward pass doesn't throw any errors. It often happens that the wrong dimensions are
|
||||
used leading to a `Dimensionality mismatch` error or that the wrong data type object is used, *e.g.* ``torch.long``
|
||||
instead of ``torch.float32``. Don't hesitate to ask the Hugging Face team for help, if you don't manage to solve
|
||||
certain errors.
|
||||
|
||||
The final part to make sure the 🤗 Transformers implementation works correctly is to ensure that the outputs are
|
||||
equivalent to a precision of ``1e-3``. First, you should ensure that the output shapes are identical, *i.e.*
|
||||
``outputs.shape`` should yield the same value for the script of the 🤗 Transformers implementation and the original
|
||||
implementation. Next, you should make sure that the output values are identical as well. This one of the most difficult
|
||||
parts of adding a new model. Common mistakes why the outputs are not identical are:
|
||||
|
||||
- Some layers were not added, *i.e.* an `activation` layer was not added, or the residual connection was forgotten
|
||||
- The word embedding matrix was not tied
|
||||
- The wrong positional embeddings are used because the original implementation uses on offset
|
||||
- Dropout is applied during the forward pass. To fix this make sure `model.training is False` and that no dropout
|
||||
layer is falsely activated during the forward pass, *i.e.* pass `self.training` to `PyTorch's functional dropout
|
||||
<https://pytorch.org/docs/stable/nn.functional.html?highlight=dropout#torch.nn.functional.dropout>`_
|
||||
|
||||
The best way to fix the problem is usually to look at the forward pass of the original implementation and the 🤗
|
||||
Transformers implementation side-by-side and check if there are any differences. Ideally, you should debug/print out
|
||||
intermediate outputs of both implementations of the forward pass to find the exact position in the network where the 🤗
|
||||
Transformers implementation shows a different output than the original implementation. First, make sure that the
|
||||
hard-coded ``input_ids`` in both scripts are identical. Next, verify that the outputs of the first transformation of
|
||||
the ``input_ids`` (usually the word embeddings) are identical. And then work your way up to the very last layer of the
|
||||
network. At some point, you will notice a difference between the two implementations, which should point you to the bug
|
||||
in the 🤗 Transformers implementation. From our experience, a simple and efficient way is to add many print statements
|
||||
in both the original implementation and 🤗 Transformers implementation, at the same positions in the network
|
||||
respectively, and to successively remove print statements showing the same values for intermediate presentions.
|
||||
|
||||
When you're confident that both implementations yield the same output, verifying the outputs with
|
||||
``torch.allclose(original_output, output, atol=1e-3)``, you're done with the most difficult part! Congratulations - the
|
||||
work left to be done should be a cakewalk 😊.
|
||||
|
||||
**8. Adding all necessary model tests**
|
||||
|
||||
At this point, you have successfully added a new model. However, it is very much possible that the model does not yet
|
||||
fully comply with the required design. To make sure, the implementation is fully compatible with 🤗 Transformers, all
|
||||
common tests should pass. The Cookiecutter should have automatically added a test file for your model, probably under
|
||||
the same ``tests/test_modeling_brand_new_bert.py``. Run this test file to verify that all common tests pass:
|
||||
|
||||
.. code:: python
|
||||
|
||||
pytest tests/test_modeling_brand_new_bert.py
|
||||
|
||||
Having fixed all common tests, it is now crucial to ensure that all the nice work you have done is well tested, so that
|
||||
|
||||
-
|
||||
|
||||
a) The community can easily understand your work by looking at specific tests of *brand_new_bert*
|
||||
|
||||
-
|
||||
|
||||
b) Future changes to your model will not break any important feature of the model.
|
||||
|
||||
At first, integration tests should be added. Those integration tests essentially do the same as the debugging scripts
|
||||
you used earlier to implement the model to 🤗 Transformers. A template of those model tests is already added by the
|
||||
Cookiecutter, called ``BrandNewBertModelIntegrationTests`` and only has to be filled out by you. To ensure that those
|
||||
tests are passing, run
|
||||
|
||||
.. code:: python
|
||||
|
||||
RUN_SLOW=1 pytest -sv tests/test_modeling_brand_new_bert.py::BrandNewBertModelIntegrationTests
|
||||
|
||||
.. note::
|
||||
|
||||
In case you are using Windows, you should replace ``RUN_SLOW=1`` with ``SET RUN_SLOW=1``
|
||||
|
||||
Second, all features that are special to *brand_new_bert* should be tested additionally in a separate test under
|
||||
``BrandNewBertModelTester``/``BrandNewBertModelTest``. This part is often forgotten but is extremely useful in two
|
||||
ways:
|
||||
|
||||
- It helps to transfer the knowledge you have acquired during the model addition to the community by showing how the
|
||||
special features of *brand_new_bert* should work.
|
||||
- Future contributors can quickly test changes to the model by running those special tests.
|
||||
|
||||
|
||||
**9. Implement the tokenizer**
|
||||
|
||||
Next, we should add the tokenizer of *brand_new_bert*. Usually, the tokenizer is equivalent or very similar to an
|
||||
already existing tokenizer of 🤗 Transformers.
|
||||
|
||||
It is very important to find/extract the original tokenizer file and to manage to load this file into the 🤗
|
||||
Transformers' implementation of the tokenizer.
|
||||
|
||||
To ensure that the tokenizer works correctly, it is recommended to first create a script in the original repository
|
||||
that inputs a string and returns the ``input_ids``. It could look similar to this (in pseudo-code):
|
||||
|
||||
.. code:: bash
|
||||
|
||||
input_str = "This is a long example input string containing special characters .$?-, numbers 2872 234 12 and words."
|
||||
model = BrandNewBertModel.load_pretrained_checkpoint(/path/to/checkpoint/)
|
||||
input_ids = model.tokenize(input_str)
|
||||
|
||||
You might have to take a deeper look again into the original repository to find the correct tokenizer function or you
|
||||
might even have to do changes to your clone of the original repository to only output the ``input_ids``. Having written
|
||||
a functional tokenization script that uses the original repository, an analogous script for 🤗 Transformers should be
|
||||
created. It should look similar to this:
|
||||
|
||||
.. code:: python
|
||||
|
||||
from transformers import BrandNewBertTokenizer
|
||||
input_str = "This is a long example input string containing special characters .$?-, numbers 2872 234 12 and words."
|
||||
|
||||
tokenizer = BrandNewBertTokenizer.from_pretrained(/path/to/tokenizer/folder/)
|
||||
|
||||
input_ids = tokenizer(input_str).input_ids
|
||||
|
||||
When both ``input_ids`` yield the same values, as a final step a tokenizer test file should also be added.
|
||||
|
||||
Analogous to the modeling test files of *brand_new_bert*, the tokenization test files of *brand_new_bert* should
|
||||
contain a couple of hard-coded integration tests.
|
||||
|
||||
**10. Run End-to-end integration tests**
|
||||
|
||||
Having added the tokenizer, you should also add a couple of end-to-end integration tests using both the model and the
|
||||
tokenizer to ``tests/test_modeling_brand_new_bert.py`` in 🤗 Transformers. Such a test should show on a meaningful
|
||||
text-to-text sample that the 🤗 Transformers implementation works as expected. A meaningful text-to-text sample can
|
||||
include *e.g.* a source-to-target-translation pair, an article-to-summary pair, a question-to-answer pair, etc… If none
|
||||
of the ported checkpoints has been fine-tuned on a downstream task it is enough to simply rely on the model tests. In a
|
||||
final step to ensure that the model is fully functional, it is advised that you also run all tests on GPU. It can
|
||||
happen that you forgot to add some ``.to(self.device)`` statements to internal tensors of the model, which in such a
|
||||
test would show in an error. In case you have no access to a GPU, the Hugging Face team can take care of running those
|
||||
tests for you.
|
||||
|
||||
**11. Add Docstring**
|
||||
|
||||
Now, all the necessary functionality for *brand_new_bert* is added - you're almost done! The only thing left to add is
|
||||
a nice docstring and a doc page. The Cookiecutter should have added a template file called
|
||||
``docs/source/model_doc/brand_new_bert.rst`` that you should fill out. Users of your model will usually first look at
|
||||
this page before using your model. Hence, the documentation must be understandable and concise. It is very useful for
|
||||
the community to add some *Tips* to show how the model should be used. Don't hesitate to ping the Hugging Face team
|
||||
regarding the docstrings.
|
||||
|
||||
Next, make sure that the docstring added to ``src/transformers/models/brand_new_bert/modeling_brand_new_bert.py`` is
|
||||
correct and included all necessary inputs and outputs. It is always to good to remind oneself that documentation should
|
||||
be treated at least as carefully as the code in 🤗 Transformers since the documentation is usually the first contact
|
||||
point of the community with the model.
|
||||
|
||||
**Code refactor**
|
||||
|
||||
Great, now you have added all the necessary code for *brand_new_bert*. At this point, you should correct some potential
|
||||
incorrect code style by running:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
make style
|
||||
|
||||
and verify that your coding style passes the quality check:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
make quality
|
||||
|
||||
There are a couple of other very strict design tests in 🤗 Transformers that might still be failing, which shows up in
|
||||
the tests of your pull request. This is often because of some missing information in the docstring or some incorrect
|
||||
naming. The Hugging Face team will surely help you if you're stuck here.
|
||||
|
||||
Lastly, it is always a good idea to refactor one's code after having ensured that the code works correctly. With all
|
||||
tests passing, now it's a good time to go over the added code again and do some refactoring.
|
||||
|
||||
You have now finished the coding part, congratulation! 🎉 You are Awesome! 😎
|
||||
|
||||
**12. Upload the models to the model hub**
|
||||
|
||||
In this final part, you should convert and upload all checkpoints to the model hub and add a model card for each
|
||||
uploaded model checkpoint. You should work alongside the Hugging Face team here to decide on a fitting name for each
|
||||
checkpoint and to get the required access rights to be able to upload the model under the author's organization of
|
||||
*brand_new_bert*.
|
||||
|
||||
It is worth spending some time to create fitting model cards for each checkpoint. The model cards should highlight the
|
||||
specific characteristics of this particular checkpoint, *e.g.* On which dataset was the checkpoint
|
||||
pretrained/fine-tuned on? On what down-stream task should the model be used? And also include some code on how to
|
||||
correctly use the model.
|
||||
|
||||
**13. (Optional) Add notebook**
|
||||
|
||||
It is very helpful to add a notebook that showcases in-detail how *brand_new_bert* can be used for inference and/or
|
||||
fine-tuned on a downstream task. This is not mandatory to merge your PR, but very useful for the community.
|
||||
|
||||
**14. Submit your finished PR**
|
||||
|
||||
You're done programming now and can move to the last step, which is getting your PR merged into master. Usually, the
|
||||
Hugging Face team should have helped you already at this point, but it is worth taking some time to give your finished
|
||||
PR a nice description and eventually add comments to your code, if you want to point out certain design choices to your
|
||||
reviewer.
|
||||
|
||||
Share your work!!
|
||||
-----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
Now, it's time to get some credit from the community for your work! Having completed a model addition is a major
|
||||
contribution to Transformers and the whole NLP community. Your code and the ported pre-trained models will certainly be
|
||||
used by hundreds and possibly even thousands of developers and researchers. You should be proud of your work and share
|
||||
your achievement with the community.
|
||||
|
||||
**You have made another model that is super easy to access for everyone in the community! 🤯**
|
@ -1,10 +1,22 @@
|
||||
..
|
||||
Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
Benchmarks
|
||||
=======================================================================================================================
|
||||
|
||||
Let's take a look at how 🤗 Transformer models can be benchmarked, best practices, and already available benchmarks.
|
||||
|
||||
A notebook explaining in more detail how to benchmark 🤗 Transformer models can be found `here
|
||||
<https://github.com/huggingface/transformers/blob/master/notebooks/05-benchmark.ipynb>`__.
|
||||
A notebook explaining in more detail how to benchmark 🤗 Transformer models can be found :prefix_link:`here
|
||||
<notebooks/05-benchmark.ipynb>`.
|
||||
|
||||
How to benchmark 🤗 Transformer models
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
@ -53,10 +65,10 @@ respectively.
|
||||
.. code-block:: bash
|
||||
|
||||
## PYTORCH CODE
|
||||
python examples/benchmarking/run_benchmark.py --help
|
||||
python examples/pytorch/benchmarking/run_benchmark.py --help
|
||||
|
||||
## TENSORFLOW CODE
|
||||
python examples/benchmarking/run_benchmark_tf.py --help
|
||||
python examples/tensorflow/benchmarking/run_benchmark_tf.py --help
|
||||
|
||||
|
||||
An instantiated benchmark object can then simply be run by calling ``benchmark.run()``.
|
||||
@ -87,6 +99,7 @@ An instantiated benchmark object can then simply be run by calling ``benchmark.r
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
==================== ENVIRONMENT INFORMATION ====================
|
||||
|
||||
- transformers_version: 2.11.0
|
||||
- framework: PyTorch
|
||||
- use_torchscript: False
|
||||
@ -133,6 +146,7 @@ An instantiated benchmark object can then simply be run by calling ``benchmark.r
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
==================== ENVIRONMENT INFORMATION ====================
|
||||
|
||||
- transformers_version: 2.11.0
|
||||
- framework: Tensorflow
|
||||
- use_xla: False
|
||||
@ -216,6 +230,7 @@ configurations must be inserted with the benchmark args as follows.
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
==================== ENVIRONMENT INFORMATION ====================
|
||||
|
||||
- transformers_version: 2.11.0
|
||||
- framework: PyTorch
|
||||
- use_torchscript: False
|
||||
@ -285,6 +300,7 @@ configurations must be inserted with the benchmark args as follows.
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
==================== ENVIRONMENT INFORMATION ====================
|
||||
|
||||
- transformers_version: 2.11.0
|
||||
- framework: Tensorflow
|
||||
- use_xla: False
|
||||
@ -341,5 +357,5 @@ The approach is detailed in the `following blogpost
|
||||
available `here
|
||||
<https://docs.google.com/spreadsheets/d/1sryqufw2D0XlUH4sq3e9Wnxu5EAQkaohzrJbd5HdQ_w/edit?usp=sharing>`__.
|
||||
|
||||
With the new `benchmark` tools, it is easier than ever to share your benchmark results with the community `here
|
||||
<https://github.com/huggingface/transformers/blob/master/examples/benchmarking/README.md>`__.
|
||||
With the new `benchmark` tools, it is easier than ever to share your benchmark results with the community
|
||||
:prefix_link:`here <examples/benchmarking/README.md>`.
|
||||
|
@ -1,3 +1,15 @@
|
||||
..
|
||||
Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
BERTology
|
||||
-----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
@ -21,6 +33,6 @@ help people access the inner representations, mainly adapted from the great work
|
||||
* retrieving heads output values and gradients to be able to compute head importance score and prune head as explained
|
||||
in https://arxiv.org/abs/1905.10650.
|
||||
|
||||
To help you understand and use these features, we have added a specific example script: `bertology.py
|
||||
<https://github.com/huggingface/transformers/blob/master/examples/bertology/run_bertology.py>`_ while extract
|
||||
information and prune a model pre-trained on GLUE.
|
||||
To help you understand and use these features, we have added a specific example script: :prefix_link:`bertology.py
|
||||
<examples/research_projects/bertology/run_bertology.py>` while extract information and prune a model pre-trained on
|
||||
GLUE.
|
||||
|
58
docs/source/community.md
Normal file
58
docs/source/community.md
Normal file
@ -0,0 +1,58 @@
|
||||
# Community
|
||||
|
||||
This page regroups resources around 🤗 Transformers developed by the community.
|
||||
|
||||
## Community resources:
|
||||
|
||||
| Resource | Description | Author |
|
||||
|:----------|:-------------|------:|
|
||||
| [Hugging Face Transformers Glossary Flashcards](https://www.darigovresearch.com/huggingface-transformers-glossary-flashcards) | A set of flashcards based on the [Transformers Docs Glossary](https://huggingface.co/transformers/master/glossary.html) that has been put into a form which can be easily learnt/revised using [Anki ](https://apps.ankiweb.net/) an open source, cross platform app specifically designed for long term knowledge retention. See this [Introductory video on how to use the flashcards](https://www.youtube.com/watch?v=Dji_h7PILrw). | [Darigov Research](https://www.darigovresearch.com/) |
|
||||
|
||||
## Community notebooks:
|
||||
|
||||
| Notebook | Description | Author | |
|
||||
|:----------|:-------------|:-------------|------:|
|
||||
| [Train T5 in Tensorflow 2 ](https://github.com/snapthat/TF-T5-text-to-text) | How to train T5 for any task using Tensorflow 2. This notebook demonstrates a Question & Answer task implemented in Tensorflow 2 using SQUAD | [Muhammad Harris](https://github.com/HarrisDePerceptron) |[](https://colab.research.google.com/github/snapthat/TF-T5-text-to-text/blob/master/snapthatT5/notebooks/TF-T5-Datasets%20Training.ipynb) |
|
||||
| [Train T5 on TPU](https://github.com/patil-suraj/exploring-T5/blob/master/T5_on_TPU.ipynb) | How to train T5 on SQUAD with Transformers and Nlp | [Suraj Patil](https://github.com/patil-suraj) |[](https://colab.research.google.com/github/patil-suraj/exploring-T5/blob/master/T5_on_TPU.ipynb#scrollTo=QLGiFCDqvuil) |
|
||||
| [Fine-tune T5 for Classification and Multiple Choice](https://github.com/patil-suraj/exploring-T5/blob/master/t5_fine_tuning.ipynb) | How to fine-tune T5 for classification and multiple choice tasks using a text-to-text format with PyTorch Lightning | [Suraj Patil](https://github.com/patil-suraj) | [](https://colab.research.google.com/github/patil-suraj/exploring-T5/blob/master/t5_fine_tuning.ipynb) |
|
||||
| [Fine-tune DialoGPT on New Datasets and Languages](https://github.com/ncoop57/i-am-a-nerd/blob/master/_notebooks/2020-05-12-chatbot-part-1.ipynb) | How to fine-tune the DialoGPT model on a new dataset for open-dialog conversational chatbots | [Nathan Cooper](https://github.com/ncoop57) | [](https://colab.research.google.com/github/ncoop57/i-am-a-nerd/blob/master/_notebooks/2020-05-12-chatbot-part-1.ipynb) |
|
||||
| [Long Sequence Modeling with Reformer](https://github.com/patrickvonplaten/notebooks/blob/master/PyTorch_Reformer.ipynb) | How to train on sequences as long as 500,000 tokens with Reformer | [Patrick von Platen](https://github.com/patrickvonplaten) | [](https://colab.research.google.com/github/patrickvonplaten/notebooks/blob/master/PyTorch_Reformer.ipynb) |
|
||||
| [Fine-tune BART for Summarization](https://github.com/ohmeow/ohmeow_website/blob/master/_notebooks/2020-05-23-text-generation-with-blurr.ipynb) | How to fine-tune BART for summarization with fastai using blurr | [Wayde Gilliam](https://ohmeow.com/) | [](https://colab.research.google.com/github/ohmeow/ohmeow_website/blob/master/_notebooks/2020-05-23-text-generation-with-blurr.ipynb) |
|
||||
| [Fine-tune a pre-trained Transformer on anyone's tweets](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb) | How to generate tweets in the style of your favorite Twitter account by fine-tuning a GPT-2 model | [Boris Dayma](https://github.com/borisdayma) | [](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb) |
|
||||
| [Optimize 🤗 Hugging Face models with Weights & Biases](https://colab.research.google.com/github/wandb/examples/blob/master/colabs/huggingface/Optimize_Hugging_Face_models_with_Weights_%26_Biases.ipynb) | A complete tutorial showcasing W&B integration with Hugging Face | [Boris Dayma](https://github.com/borisdayma) | [](https://colab.research.google.com/github/wandb/examples/blob/master/colabs/huggingface/Optimize_Hugging_Face_models_with_Weights_%26_Biases.ipynb) |
|
||||
| [Pretrain Longformer](https://github.com/allenai/longformer/blob/master/scripts/convert_model_to_long.ipynb) | How to build a "long" version of existing pretrained models | [Iz Beltagy](https://beltagy.net) | [](https://colab.research.google.com/github/allenai/longformer/blob/master/scripts/convert_model_to_long.ipynb) |
|
||||
| [Fine-tune Longformer for QA](https://github.com/patil-suraj/Notebooks/blob/master/longformer_qa_training.ipynb) | How to fine-tune longformer model for QA task | [Suraj Patil](https://github.com/patil-suraj) | [](https://colab.research.google.com/github/patil-suraj/Notebooks/blob/master/longformer_qa_training.ipynb) |
|
||||
| [Evaluate Model with 🤗nlp](https://github.com/patrickvonplaten/notebooks/blob/master/How_to_evaluate_Longformer_on_TriviaQA_using_NLP.ipynb) | How to evaluate longformer on TriviaQA with `nlp` | [Patrick von Platen](https://github.com/patrickvonplaten) | [](https://colab.research.google.com/drive/1m7eTGlPmLRgoPkkA7rkhQdZ9ydpmsdLE?usp=sharing) |
|
||||
| [Fine-tune T5 for Sentiment Span Extraction](https://github.com/enzoampil/t5-intro/blob/master/t5_qa_training_pytorch_span_extraction.ipynb) | How to fine-tune T5 for sentiment span extraction using a text-to-text format with PyTorch Lightning | [Lorenzo Ampil](https://github.com/enzoampil) | [](https://colab.research.google.com/github/enzoampil/t5-intro/blob/master/t5_qa_training_pytorch_span_extraction.ipynb) |
|
||||
| [Fine-tune DistilBert for Multiclass Classification](https://github.com/abhimishra91/transformers-tutorials/blob/master/transformers_multiclass_classification.ipynb) | How to fine-tune DistilBert for multiclass classification with PyTorch | [Abhishek Kumar Mishra](https://github.com/abhimishra91) | [](https://colab.research.google.com/github/abhimishra91/transformers-tutorials/blob/master/transformers_multiclass_classification.ipynb)|
|
||||
|[Fine-tune BERT for Multi-label Classification](https://github.com/abhimishra91/transformers-tutorials/blob/master/transformers_multi_label_classification.ipynb)|How to fine-tune BERT for multi-label classification using PyTorch|[Abhishek Kumar Mishra](https://github.com/abhimishra91) |[](https://colab.research.google.com/github/abhimishra91/transformers-tutorials/blob/master/transformers_multi_label_classification.ipynb)|
|
||||
|[Fine-tune T5 for Summarization](https://github.com/abhimishra91/transformers-tutorials/blob/master/transformers_summarization_wandb.ipynb)|How to fine-tune T5 for summarization in PyTorch and track experiments with WandB|[Abhishek Kumar Mishra](https://github.com/abhimishra91) |[](https://colab.research.google.com/github/abhimishra91/transformers-tutorials/blob/master/transformers_summarization_wandb.ipynb)|
|
||||
|[Speed up Fine-Tuning in Transformers with Dynamic Padding / Bucketing](https://github.com/ELS-RD/transformers-notebook/blob/master/Divide_Hugging_Face_Transformers_training_time_by_2_or_more.ipynb)|How to speed up fine-tuning by a factor of 2 using dynamic padding / bucketing|[Michael Benesty](https://github.com/pommedeterresautee) |[](https://colab.research.google.com/drive/1CBfRU1zbfu7-ijiOqAAQUA-RJaxfcJoO?usp=sharing)|
|
||||
|[Pretrain Reformer for Masked Language Modeling](https://github.com/patrickvonplaten/notebooks/blob/master/Reformer_For_Masked_LM.ipynb)| How to train a Reformer model with bi-directional self-attention layers | [Patrick von Platen](https://github.com/patrickvonplaten) | [](https://colab.research.google.com/drive/1tzzh0i8PgDQGV3SMFUGxM7_gGae3K-uW?usp=sharing)|
|
||||
|[Expand and Fine Tune Sci-BERT](https://github.com/lordtt13/word-embeddings/blob/master/COVID-19%20Research%20Data/COVID-SciBERT.ipynb)| How to increase vocabulary of a pretrained SciBERT model from AllenAI on the CORD dataset and pipeline it. | [Tanmay Thakur](https://github.com/lordtt13) | [](https://colab.research.google.com/drive/1rqAR40goxbAfez1xvF3hBJphSCsvXmh8)|
|
||||
|[Fine Tune BlenderBotSmall for Summarization using the Trainer API](https://github.com/lordtt13/transformers-experiments/blob/master/Custom%20Tasks/fine-tune-blenderbot_small-for-summarization.ipynb)| How to fine tune BlenderBotSmall for summarization on a custom dataset, using the Trainer API. | [Tanmay Thakur](https://github.com/lordtt13) | [](https://colab.research.google.com/drive/19Wmupuls7mykSGyRN_Qo6lPQhgp56ymq?usp=sharing)|
|
||||
|[Fine-tune Electra and interpret with Integrated Gradients](https://github.com/elsanns/xai-nlp-notebooks/blob/master/electra_fine_tune_interpret_captum_ig.ipynb) | How to fine-tune Electra for sentiment analysis and interpret predictions with Captum Integrated Gradients | [Eliza Szczechla](https://elsanns.github.io) | [](https://colab.research.google.com/github/elsanns/xai-nlp-notebooks/blob/master/electra_fine_tune_interpret_captum_ig.ipynb)|
|
||||
|[fine-tune a non-English GPT-2 Model with Trainer class](https://github.com/philschmid/fine-tune-GPT-2/blob/master/Fine_tune_a_non_English_GPT_2_Model_with_Huggingface.ipynb) | How to fine-tune a non-English GPT-2 Model with Trainer class | [Philipp Schmid](https://www.philschmid.de) | [](https://colab.research.google.com/github/philschmid/fine-tune-GPT-2/blob/master/Fine_tune_a_non_English_GPT_2_Model_with_Huggingface.ipynb)|
|
||||
|[Fine-tune a DistilBERT Model for Multi Label Classification task](https://github.com/DhavalTaunk08/Transformers_scripts/blob/master/Transformers_multilabel_distilbert.ipynb) | How to fine-tune a DistilBERT Model for Multi Label Classification task | [Dhaval Taunk](https://github.com/DhavalTaunk08) | [](https://colab.research.google.com/github/DhavalTaunk08/Transformers_scripts/blob/master/Transformers_multilabel_distilbert.ipynb)|
|
||||
|[Fine-tune ALBERT for sentence-pair classification](https://github.com/NadirEM/nlp-notebooks/blob/master/Fine_tune_ALBERT_sentence_pair_classification.ipynb) | How to fine-tune an ALBERT model or another BERT-based model for the sentence-pair classification task | [Nadir El Manouzi](https://github.com/NadirEM) | [](https://colab.research.google.com/github/NadirEM/nlp-notebooks/blob/master/Fine_tune_ALBERT_sentence_pair_classification.ipynb)|
|
||||
|[Fine-tune Roberta for sentiment analysis](https://github.com/DhavalTaunk08/NLP_scripts/blob/master/sentiment_analysis_using_roberta.ipynb) | How to fine-tune an Roberta model for sentiment analysis | [Dhaval Taunk](https://github.com/DhavalTaunk08) | [](https://colab.research.google.com/github/DhavalTaunk08/NLP_scripts/blob/master/sentiment_analysis_using_roberta.ipynb)|
|
||||
|[Evaluating Question Generation Models](https://github.com/flexudy-pipe/qugeev) | How accurate are the answers to questions generated by your seq2seq transformer model? | [Pascal Zoleko](https://github.com/zolekode) | [](https://colab.research.google.com/drive/1bpsSqCQU-iw_5nNoRm_crPq6FRuJthq_?usp=sharing)|
|
||||
|[Classify text with DistilBERT and Tensorflow](https://github.com/peterbayerle/huggingface_notebook/blob/main/distilbert_tf.ipynb) | How to fine-tune DistilBERT for text classification in TensorFlow | [Peter Bayerle](https://github.com/peterbayerle) | [](https://colab.research.google.com/github/peterbayerle/huggingface_notebook/blob/main/distilbert_tf.ipynb)|
|
||||
|[Leverage BERT for Encoder-Decoder Summarization on CNN/Dailymail](https://github.com/patrickvonplaten/notebooks/blob/master/BERT2BERT_for_CNN_Dailymail.ipynb) | How to warm-start a *EncoderDecoderModel* with a *bert-base-uncased* checkpoint for summarization on CNN/Dailymail | [Patrick von Platen](https://github.com/patrickvonplaten) | [](https://colab.research.google.com/github/patrickvonplaten/notebooks/blob/master/BERT2BERT_for_CNN_Dailymail.ipynb)|
|
||||
|[Leverage RoBERTa for Encoder-Decoder Summarization on BBC XSum](https://github.com/patrickvonplaten/notebooks/blob/master/RoBERTaShared_for_BBC_XSum.ipynb) | How to warm-start a shared *EncoderDecoderModel* with a *roberta-base* checkpoint for summarization on BBC/XSum | [Patrick von Platen](https://github.com/patrickvonplaten) | [](https://colab.research.google.com/github/patrickvonplaten/notebooks/blob/master/RoBERTaShared_for_BBC_XSum.ipynb)|
|
||||
|[Fine-tune TAPAS on Sequential Question Answering (SQA)](https://github.com/NielsRogge/Transformers-Tutorials/blob/master/TAPAS/Fine_tuning_TapasForQuestionAnswering_on_SQA.ipynb) | How to fine-tune *TapasForQuestionAnswering* with a *tapas-base* checkpoint on the Sequential Question Answering (SQA) dataset | [Niels Rogge](https://github.com/nielsrogge) | [](https://colab.research.google.com/github/NielsRogge/Transformers-Tutorials/blob/master/TAPAS/Fine_tuning_TapasForQuestionAnswering_on_SQA.ipynb)|
|
||||
|[Evaluate TAPAS on Table Fact Checking (TabFact)](https://github.com/NielsRogge/Transformers-Tutorials/blob/master/TAPAS/Evaluating_TAPAS_on_the_Tabfact_test_set.ipynb) | How to evaluate a fine-tuned *TapasForSequenceClassification* with a *tapas-base-finetuned-tabfact* checkpoint using a combination of the 🤗 datasets and 🤗 transformers libraries | [Niels Rogge](https://github.com/nielsrogge) | [](https://colab.research.google.com/github/NielsRogge/Transformers-Tutorials/blob/master/TAPAS/Evaluating_TAPAS_on_the_Tabfact_test_set.ipynb)|
|
||||
|[Fine-tuning mBART for translation](https://colab.research.google.com/github/vasudevgupta7/huggingface-tutorials/blob/main/translation_training.ipynb) | How to fine-tune mBART using Seq2SeqTrainer for Hindi to English translation | [Vasudev Gupta](https://github.com/vasudevgupta7) | [](https://colab.research.google.com/github/vasudevgupta7/huggingface-tutorials/blob/main/translation_training.ipynb)|
|
||||
|[Fine-tune LayoutLM on FUNSD (a form understanding dataset)](https://github.com/NielsRogge/Transformers-Tutorials/blob/master/LayoutLM/Fine_tuning_LayoutLMForTokenClassification_on_FUNSD.ipynb) | How to fine-tune *LayoutLMForTokenClassification* on the FUNSD dataset for information extraction from scanned documents | [Niels Rogge](https://github.com/nielsrogge) | [](https://colab.research.google.com/github/NielsRogge/Transformers-Tutorials/blob/master/LayoutLM/Fine_tuning_LayoutLMForTokenClassification_on_FUNSD.ipynb)|
|
||||
|[Fine-Tune DistilGPT2 and Generate Text](https://colab.research.google.com/github/tripathiaakash/DistilGPT2-Tutorial/blob/main/distilgpt2_fine_tuning.ipynb) | How to fine-tune DistilGPT2 and generate text | [Aakash Tripathi](https://github.com/tripathiaakash) | [](https://colab.research.google.com/github/tripathiaakash/DistilGPT2-Tutorial/blob/main/distilgpt2_fine_tuning.ipynb)|
|
||||
|[Fine-Tune LED on up to 8K tokens](https://github.com/patrickvonplaten/notebooks/blob/master/Fine_tune_Longformer_Encoder_Decoder_(LED)_for_Summarization_on_pubmed.ipynb) | How to fine-tune LED on pubmed for long-range summarization | [Patrick von Platen](https://github.com/patrickvonplaten) | [](https://colab.research.google.com/github/patrickvonplaten/notebooks/blob/master/Fine_tune_Longformer_Encoder_Decoder_(LED)_for_Summarization_on_pubmed.ipynb)|
|
||||
|[Evaluate LED on Arxiv](https://github.com/patrickvonplaten/notebooks/blob/master/LED_on_Arxiv.ipynb) | How to effectively evaluate LED on long-range summarization | [Patrick von Platen](https://github.com/patrickvonplaten) | [](https://colab.research.google.com/github/patrickvonplaten/notebooks/blob/master/LED_on_Arxiv.ipynb)|
|
||||
|[Fine-tune LayoutLM on RVL-CDIP (a document image classification dataset)](https://github.com/NielsRogge/Transformers-Tutorials/blob/master/LayoutLM/Fine_tuning_LayoutLMForSequenceClassification_on_RVL_CDIP.ipynb) | How to fine-tune *LayoutLMForSequenceClassification* on the RVL-CDIP dataset for scanned document classification | [Niels Rogge](https://github.com/nielsrogge) | [](https://colab.research.google.com/github/NielsRogge/Transformers-Tutorials/blob/master/LayoutLM/Fine_tuning_LayoutLMForSequenceClassification_on_RVL_CDIP.ipynb)|
|
||||
|[Wav2Vec2 CTC decoding with GPT2 adjustment](https://github.com/voidful/huggingface_notebook/blob/main/xlsr_gpt.ipynb) | How to decode CTC sequence with language model adjustment | [Eric Lam](https://github.com/voidful) | [](https://colab.research.google.com/drive/1e_z5jQHYbO2YKEaUgzb1ww1WwiAyydAj?usp=sharing)|
|
||||
|[Fine-tune BART for summarization in two languages with Trainer class](https://github.com/elsanns/xai-nlp-notebooks/blob/master/fine_tune_bart_summarization_two_langs.ipynb) | How to fine-tune BART for summarization in two languages with Trainer class | [Eliza Szczechla](https://github.com/elsanns) | [](https://colab.research.google.com/github/elsanns/xai-nlp-notebooks/blob/master/fine_tune_bart_summarization_two_langs.ipynb)|
|
||||
|[Evaluate Big Bird on Trivia QA](https://github.com/patrickvonplaten/notebooks/blob/master/Evaluating_Big_Bird_on_TriviaQA.ipynb) | How to evaluate BigBird on long document question answering on Trivia QA | [Patrick von Platen](https://github.com/patrickvonplaten) | [](https://colab.research.google.com/github/patrickvonplaten/notebooks/blob/master/Evaluating_Big_Bird_on_TriviaQA.ipynb)|
|
||||
| [Create video captions using Wav2Vec2](https://github.com/Muennighoff/ytclipcc/blob/main/wav2vec_youtube_captions.ipynb) | How to create YouTube captions from any video by transcribing the audio with Wav2Vec | [Niklas Muennighoff](https://github.com/Muennighoff) |[](https://colab.research.google.com/github/Muennighoff/ytclipcc/blob/main/wav2vec_youtube_captions.ipynb) |
|
||||
| [Evaluate LUKE on Open Entity, an entity typing dataset](https://github.com/studio-ousia/luke/blob/master/notebooks/huggingface_open_entity.ipynb) | How to evaluate *LukeForEntityClassification* on the Open Entity dataset | [Ikuya Yamada](https://github.com/ikuyamada) |[](https://colab.research.google.com/github/studio-ousia/luke/blob/master/notebooks/huggingface_open_entity.ipynb) |
|
||||
| [Evaluate LUKE on TACRED, a relation extraction dataset](https://github.com/studio-ousia/luke/blob/master/notebooks/huggingface_tacred.ipynb) | How to evaluate *LukeForEntityPairClassification* on the TACRED dataset | [Ikuya Yamada](https://github.com/ikuyamada) |[](https://colab.research.google.com/github/studio-ousia/luke/blob/master/notebooks/huggingface_tacred.ipynb) |
|
||||
| [Evaluate LUKE on CoNLL-2003, an important NER benchmark](https://github.com/studio-ousia/luke/blob/master/notebooks/huggingface_conll_2003.ipynb) | How to evaluate *LukeForEntitySpanClassification* on the CoNLL-2003 dataset | [Ikuya Yamada](https://github.com/ikuyamada) |[](https://colab.research.google.com/github/studio-ousia/luke/blob/master/notebooks/huggingface_conll_2003.ipynb) |
|
||||
| [Evaluate BigBird-Pegasus on PubMed dataset](https://github.com/vasudevgupta7/bigbird/blob/main/notebooks/bigbird_pegasus_evaluation.ipynb) | How to evaluate *BigBirdPegasusForConditionalGeneration* on PubMed dataset | [Vasudev Gupta](https://github.com/vasudevgupta7) | [](https://colab.research.google.com/github/vasudevgupta7/bigbird/blob/main/notebooks/bigbird_pegasus_evaluation.ipynb) |
|
@ -14,21 +14,28 @@
|
||||
#
|
||||
import os
|
||||
import sys
|
||||
sys.path.insert(0, os.path.abspath('../../src'))
|
||||
|
||||
sys.path.insert(0, os.path.abspath("../../src"))
|
||||
|
||||
|
||||
# -- Project information -----------------------------------------------------
|
||||
|
||||
project = u'transformers'
|
||||
copyright = u'2020, huggingface'
|
||||
author = u'huggingface'
|
||||
project = "transformers"
|
||||
copyright = "2020, The Hugging Face Team, Licenced under the Apache License, Version 2.0"
|
||||
author = "huggingface"
|
||||
|
||||
# The short X.Y version
|
||||
version = u''
|
||||
version = ""
|
||||
# The full version, including alpha/beta/rc tags
|
||||
release = u'3.5.0'
|
||||
release = u'4.6.1'
|
||||
|
||||
|
||||
|
||||
# Prefix link to point to master, comment this during version release and uncomment below line
|
||||
extlinks = {"prefix_link": ("https://github.com/huggingface/transformers/blob/master/%s", "")}
|
||||
# Prefix link to always point to corresponding version, uncomment this during version release
|
||||
# extlinks = {'prefix_link': ('https://github.com/huggingface/transformers/blob/v'+ release + '/%s', '')}
|
||||
|
||||
# -- General configuration ---------------------------------------------------
|
||||
|
||||
# If your documentation needs a minimal Sphinx version, state it here.
|
||||
@ -39,26 +46,28 @@ release = u'3.5.0'
|
||||
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
|
||||
# ones.
|
||||
extensions = [
|
||||
'sphinx.ext.autodoc',
|
||||
'sphinx.ext.coverage',
|
||||
'sphinx.ext.napoleon',
|
||||
'recommonmark',
|
||||
'sphinx.ext.viewcode',
|
||||
'sphinx_markdown_tables',
|
||||
'sphinx_copybutton'
|
||||
"sphinx.ext.autodoc",
|
||||
"sphinx.ext.extlinks",
|
||||
"sphinx.ext.coverage",
|
||||
"sphinx.ext.napoleon",
|
||||
"recommonmark",
|
||||
"sphinx.ext.viewcode",
|
||||
"sphinx_markdown_tables",
|
||||
"sphinxext.opengraph",
|
||||
"sphinx_copybutton",
|
||||
]
|
||||
|
||||
# Add any paths that contain templates here, relative to this directory.
|
||||
templates_path = ['_templates']
|
||||
templates_path = ["_templates"]
|
||||
|
||||
# The suffix(es) of source filenames.
|
||||
# You can specify multiple suffix as a list of string:
|
||||
#
|
||||
source_suffix = ['.rst', '.md']
|
||||
source_suffix = [".rst", ".md"]
|
||||
# source_suffix = '.rst'
|
||||
|
||||
# The master toctree document.
|
||||
master_doc = 'index'
|
||||
master_doc = "index"
|
||||
|
||||
# The language for content autogenerated by Sphinx. Refer to documentation
|
||||
# for a list of supported languages.
|
||||
@ -70,7 +79,7 @@ language = None
|
||||
# List of patterns, relative to source directory, that match files and
|
||||
# directories to ignore when looking for source files.
|
||||
# This pattern also affects html_static_path and html_extra_path.
|
||||
exclude_patterns = [u'_build', 'Thumbs.db', '.DS_Store']
|
||||
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
|
||||
|
||||
# The name of the Pygments (syntax highlighting) style to use.
|
||||
pygments_style = None
|
||||
@ -84,20 +93,30 @@ copybutton_prompt_is_regexp = True
|
||||
# The theme to use for HTML and HTML Help pages. See the documentation for
|
||||
# a list of builtin themes.
|
||||
#
|
||||
html_theme = 'sphinx_rtd_theme'
|
||||
html_theme = "sphinx_rtd_theme"
|
||||
|
||||
# Theme options are theme-specific and customize the look and feel of a theme
|
||||
# further. For a list of options available for each theme, see the
|
||||
# documentation.
|
||||
#
|
||||
html_theme_options = {
|
||||
'analytics_id': 'UA-83738774-2'
|
||||
}
|
||||
html_theme_options = {"analytics_id": "UA-83738774-2", "navigation_with_keys": True}
|
||||
|
||||
# Configuration for OpenGraph and Twitter Card Tags.
|
||||
# These are responsible for creating nice shareable social images https://ahrefs.com/blog/open-graph-meta-tags/
|
||||
# https://ogp.me/#type_website
|
||||
ogp_image = "https://huggingface.co/front/thumbnails/transformers.png"
|
||||
ogp_description = "State-of-the-art Natural Language Processing for PyTorch and TensorFlow 2.0. Transformers provides thousands of pretrained models to perform tasks on texts such as classification, information extraction, question answering, summarization, translation, text generation, etc in 100+ languages. Its aim is to make cutting-edge NLP easier to use for everyone"
|
||||
ogp_description_length = 160
|
||||
|
||||
ogp_custom_meta_tags = [
|
||||
f'<meta name="twitter:image" content="{ogp_image}">',
|
||||
f'<meta name="twitter:description" content="{ogp_description}">',
|
||||
]
|
||||
|
||||
# Add any paths that contain custom static files (such as style sheets) here,
|
||||
# relative to this directory. They are copied after the builtin static files,
|
||||
# so a file named "default.css" will overwrite the builtin "default.css".
|
||||
html_static_path = ['_static']
|
||||
html_static_path = ["_static"]
|
||||
|
||||
# Custom sidebar templates, must be a dictionary that maps document names
|
||||
# to template names.
|
||||
@ -109,17 +128,17 @@ html_static_path = ['_static']
|
||||
#
|
||||
# html_sidebars = {}
|
||||
|
||||
# This must be the name of an image file (path relative to the configuration
|
||||
# directory) that is the favicon of the docs. Modern browsers use this as
|
||||
# the icon for tabs, windows and bookmarks. It should be a Windows-style
|
||||
# This must be the name of an image file (path relative to the configuration
|
||||
# directory) that is the favicon of the docs. Modern browsers use this as
|
||||
# the icon for tabs, windows and bookmarks. It should be a Windows-style
|
||||
# icon file (.ico).
|
||||
html_favicon = 'favicon.ico'
|
||||
html_favicon = "favicon.ico"
|
||||
|
||||
|
||||
# -- Options for HTMLHelp output ---------------------------------------------
|
||||
|
||||
# Output file base name for HTML help builder.
|
||||
htmlhelp_basename = 'transformersdoc'
|
||||
htmlhelp_basename = "transformersdoc"
|
||||
|
||||
|
||||
# -- Options for LaTeX output ------------------------------------------------
|
||||
@ -128,15 +147,12 @@ latex_elements = {
|
||||
# The paper size ('letterpaper' or 'a4paper').
|
||||
#
|
||||
# 'papersize': 'letterpaper',
|
||||
|
||||
# The font size ('10pt', '11pt' or '12pt').
|
||||
#
|
||||
# 'pointsize': '10pt',
|
||||
|
||||
# Additional stuff for the LaTeX preamble.
|
||||
#
|
||||
# 'preamble': '',
|
||||
|
||||
# Latex figure (float) alignment
|
||||
#
|
||||
# 'figure_align': 'htbp',
|
||||
@ -146,8 +162,7 @@ latex_elements = {
|
||||
# (source start file, target name, title,
|
||||
# author, documentclass [howto, manual, or own class]).
|
||||
latex_documents = [
|
||||
(master_doc, 'transformers.tex', u'transformers Documentation',
|
||||
u'huggingface', 'manual'),
|
||||
(master_doc, "transformers.tex", "transformers Documentation", "huggingface", "manual"),
|
||||
]
|
||||
|
||||
|
||||
@ -155,10 +170,7 @@ latex_documents = [
|
||||
|
||||
# One entry per manual page. List of tuples
|
||||
# (source start file, name, description, authors, manual section).
|
||||
man_pages = [
|
||||
(master_doc, 'transformers', u'transformers Documentation',
|
||||
[author], 1)
|
||||
]
|
||||
man_pages = [(master_doc, "transformers", "transformers Documentation", [author], 1)]
|
||||
|
||||
|
||||
# -- Options for Texinfo output ----------------------------------------------
|
||||
@ -167,9 +179,15 @@ man_pages = [
|
||||
# (source start file, target name, title, author,
|
||||
# dir menu entry, description, category)
|
||||
texinfo_documents = [
|
||||
(master_doc, 'transformers', u'transformers Documentation',
|
||||
author, 'transformers', 'One line description of project.',
|
||||
'Miscellaneous'),
|
||||
(
|
||||
master_doc,
|
||||
"transformers",
|
||||
"transformers Documentation",
|
||||
author,
|
||||
"transformers",
|
||||
"One line description of project.",
|
||||
"Miscellaneous",
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
@ -188,11 +206,13 @@ epub_title = project
|
||||
# epub_uid = ''
|
||||
|
||||
# A list of files that should not be packed into the epub file.
|
||||
epub_exclude_files = ['search.html']
|
||||
epub_exclude_files = ["search.html"]
|
||||
|
||||
|
||||
def setup(app):
|
||||
app.add_css_file('css/huggingface.css')
|
||||
app.add_css_file('css/code-snippets.css')
|
||||
app.add_js_file('js/custom.js')
|
||||
app.add_css_file("css/huggingface.css")
|
||||
app.add_css_file("css/code-snippets.css")
|
||||
app.add_js_file("js/custom.js")
|
||||
|
||||
|
||||
# -- Extension configuration -------------------------------------------------
|
||||
|
@ -1,3 +1,15 @@
|
||||
..
|
||||
Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
Converting Tensorflow Checkpoints
|
||||
=======================================================================================================================
|
||||
|
||||
@ -15,19 +27,14 @@ BERT
|
||||
|
||||
You can convert any TensorFlow checkpoint for BERT (in particular `the pre-trained models released by Google
|
||||
<https://github.com/google-research/bert#pre-trained-models>`_\ ) in a PyTorch save file by using the
|
||||
`convert_bert_original_tf_checkpoint_to_pytorch.py
|
||||
<https://github.com/huggingface/transformers/blob/master/src/transformers/convert_bert_original_tf_checkpoint_to_pytorch.py>`_
|
||||
script.
|
||||
:prefix_link:`convert_bert_original_tf_checkpoint_to_pytorch.py
|
||||
<src/transformers/models/bert/convert_bert_original_tf_checkpoint_to_pytorch.py>` script.
|
||||
|
||||
This CLI takes as input a TensorFlow checkpoint (three files starting with ``bert_model.ckpt``\ ) and the associated
|
||||
configuration file (\ ``bert_config.json``\ ), and creates a PyTorch model for this configuration, loads the weights
|
||||
from the TensorFlow checkpoint in the PyTorch model and saves the resulting model in a standard PyTorch save file that
|
||||
can be imported using ``torch.load()`` (see examples in `run_bert_extract_features.py
|
||||
<https://github.com/huggingface/pytorch-pretrained-BERT/tree/master/examples/run_bert_extract_features.py>`_\ ,
|
||||
`run_bert_classifier.py
|
||||
<https://github.com/huggingface/pytorch-pretrained-BERT/tree/master/examples/run_bert_classifier.py>`_ and
|
||||
`run_bert_squad.py <https://github.com/huggingface/pytorch-pretrained-BERT/tree/master/examples/run_bert_squad.py>`_\
|
||||
).
|
||||
can be imported using ``from_pretrained()`` (see example in :doc:`quicktour` , :prefix_link:`run_glue.py
|
||||
<examples/pytorch/text-classification/run_glue.py>` \ ).
|
||||
|
||||
You only need to run this conversion script **once** to get a PyTorch model. You can then disregard the TensorFlow
|
||||
checkpoint (the three files starting with ``bert_model.ckpt``\ ) but be sure to keep the configuration file (\
|
||||
@ -40,12 +47,12 @@ Here is an example of the conversion process for a pre-trained ``BERT-Base Uncas
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
export BERT_BASE_DIR=/path/to/bert/uncased_L-12_H-768_A-12
|
||||
export BERT_BASE_DIR=/path/to/bert/uncased_L-12_H-768_A-12
|
||||
|
||||
transformers-cli convert --model_type bert \
|
||||
--tf_checkpoint $BERT_BASE_DIR/bert_model.ckpt \
|
||||
--config $BERT_BASE_DIR/bert_config.json \
|
||||
--pytorch_dump_output $BERT_BASE_DIR/pytorch_model.bin
|
||||
transformers-cli convert --model_type bert \
|
||||
--tf_checkpoint $BERT_BASE_DIR/bert_model.ckpt \
|
||||
--config $BERT_BASE_DIR/bert_config.json \
|
||||
--pytorch_dump_output $BERT_BASE_DIR/pytorch_model.bin
|
||||
|
||||
You can download Google's pre-trained models for the conversion `here
|
||||
<https://github.com/google-research/bert#pre-trained-models>`__.
|
||||
@ -54,9 +61,8 @@ ALBERT
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Convert TensorFlow model checkpoints of ALBERT to PyTorch using the
|
||||
`convert_albert_original_tf_checkpoint_to_pytorch.py
|
||||
<https://github.com/huggingface/transformers/blob/master/src/transformers/convert_bert_original_tf_checkpoint_to_pytorch.py>`_
|
||||
script.
|
||||
:prefix_link:`convert_albert_original_tf_checkpoint_to_pytorch.py
|
||||
<src/transformers/models/albert/convert_albert_original_tf_checkpoint_to_pytorch.py>` script.
|
||||
|
||||
The CLI takes as input a TensorFlow checkpoint (three files starting with ``model.ckpt-best``\ ) and the accompanying
|
||||
configuration file (\ ``albert_config.json``\ ), then creates and saves a PyTorch model. To run this conversion you
|
||||
@ -66,12 +72,12 @@ Here is an example of the conversion process for the pre-trained ``ALBERT Base``
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
export ALBERT_BASE_DIR=/path/to/albert/albert_base
|
||||
export ALBERT_BASE_DIR=/path/to/albert/albert_base
|
||||
|
||||
transformers-cli convert --model_type albert \
|
||||
--tf_checkpoint $ALBERT_BASE_DIR/model.ckpt-best \
|
||||
--config $ALBERT_BASE_DIR/albert_config.json \
|
||||
--pytorch_dump_output $ALBERT_BASE_DIR/pytorch_model.bin
|
||||
transformers-cli convert --model_type albert \
|
||||
--tf_checkpoint $ALBERT_BASE_DIR/model.ckpt-best \
|
||||
--config $ALBERT_BASE_DIR/albert_config.json \
|
||||
--pytorch_dump_output $ALBERT_BASE_DIR/pytorch_model.bin
|
||||
|
||||
You can download Google's pre-trained models for the conversion `here
|
||||
<https://github.com/google-research/albert#pre-trained-models>`__.
|
||||
@ -85,13 +91,13 @@ save as the same format than OpenAI pretrained model (see `here <https://github.
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
export OPENAI_GPT_CHECKPOINT_FOLDER_PATH=/path/to/openai/pretrained/numpy/weights
|
||||
export OPENAI_GPT_CHECKPOINT_FOLDER_PATH=/path/to/openai/pretrained/numpy/weights
|
||||
|
||||
transformers-cli convert --model_type gpt \
|
||||
--tf_checkpoint $OPENAI_GPT_CHECKPOINT_FOLDER_PATH \
|
||||
--pytorch_dump_output $PYTORCH_DUMP_OUTPUT \
|
||||
[--config OPENAI_GPT_CONFIG] \
|
||||
[--finetuning_task_name OPENAI_GPT_FINETUNED_TASK] \
|
||||
transformers-cli convert --model_type gpt \
|
||||
--tf_checkpoint $OPENAI_GPT_CHECKPOINT_FOLDER_PATH \
|
||||
--pytorch_dump_output $PYTORCH_DUMP_OUTPUT \
|
||||
[--config OPENAI_GPT_CONFIG] \
|
||||
[--finetuning_task_name OPENAI_GPT_FINETUNED_TASK] \
|
||||
|
||||
|
||||
OpenAI GPT-2
|
||||
@ -102,13 +108,13 @@ Here is an example of the conversion process for a pre-trained OpenAI GPT-2 mode
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
export OPENAI_GPT2_CHECKPOINT_PATH=/path/to/gpt2/pretrained/weights
|
||||
export OPENAI_GPT2_CHECKPOINT_PATH=/path/to/gpt2/pretrained/weights
|
||||
|
||||
transformers-cli convert --model_type gpt2 \
|
||||
--tf_checkpoint $OPENAI_GPT2_CHECKPOINT_PATH \
|
||||
--pytorch_dump_output $PYTORCH_DUMP_OUTPUT \
|
||||
[--config OPENAI_GPT2_CONFIG] \
|
||||
[--finetuning_task_name OPENAI_GPT2_FINETUNED_TASK]
|
||||
transformers-cli convert --model_type gpt2 \
|
||||
--tf_checkpoint $OPENAI_GPT2_CHECKPOINT_PATH \
|
||||
--pytorch_dump_output $PYTORCH_DUMP_OUTPUT \
|
||||
[--config OPENAI_GPT2_CONFIG] \
|
||||
[--finetuning_task_name OPENAI_GPT2_FINETUNED_TASK]
|
||||
|
||||
Transformer-XL
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
@ -118,13 +124,13 @@ Here is an example of the conversion process for a pre-trained Transformer-XL mo
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
export TRANSFO_XL_CHECKPOINT_FOLDER_PATH=/path/to/transfo/xl/checkpoint
|
||||
export TRANSFO_XL_CHECKPOINT_FOLDER_PATH=/path/to/transfo/xl/checkpoint
|
||||
|
||||
transformers-cli convert --model_type transfo_xl \
|
||||
--tf_checkpoint $TRANSFO_XL_CHECKPOINT_FOLDER_PATH \
|
||||
--pytorch_dump_output $PYTORCH_DUMP_OUTPUT \
|
||||
[--config TRANSFO_XL_CONFIG] \
|
||||
[--finetuning_task_name TRANSFO_XL_FINETUNED_TASK]
|
||||
transformers-cli convert --model_type transfo_xl \
|
||||
--tf_checkpoint $TRANSFO_XL_CHECKPOINT_FOLDER_PATH \
|
||||
--pytorch_dump_output $PYTORCH_DUMP_OUTPUT \
|
||||
[--config TRANSFO_XL_CONFIG] \
|
||||
[--finetuning_task_name TRANSFO_XL_FINETUNED_TASK]
|
||||
|
||||
|
||||
XLNet
|
||||
@ -134,14 +140,14 @@ Here is an example of the conversion process for a pre-trained XLNet model:
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
export TRANSFO_XL_CHECKPOINT_PATH=/path/to/xlnet/checkpoint
|
||||
export TRANSFO_XL_CONFIG_PATH=/path/to/xlnet/config
|
||||
export TRANSFO_XL_CHECKPOINT_PATH=/path/to/xlnet/checkpoint
|
||||
export TRANSFO_XL_CONFIG_PATH=/path/to/xlnet/config
|
||||
|
||||
transformers-cli convert --model_type xlnet \
|
||||
--tf_checkpoint $TRANSFO_XL_CHECKPOINT_PATH \
|
||||
--config $TRANSFO_XL_CONFIG_PATH \
|
||||
--pytorch_dump_output $PYTORCH_DUMP_OUTPUT \
|
||||
[--finetuning_task_name XLNET_FINETUNED_TASK] \
|
||||
transformers-cli convert --model_type xlnet \
|
||||
--tf_checkpoint $TRANSFO_XL_CHECKPOINT_PATH \
|
||||
--config $TRANSFO_XL_CONFIG_PATH \
|
||||
--pytorch_dump_output $PYTORCH_DUMP_OUTPUT \
|
||||
[--finetuning_task_name XLNET_FINETUNED_TASK] \
|
||||
|
||||
|
||||
XLM
|
||||
@ -151,10 +157,25 @@ Here is an example of the conversion process for a pre-trained XLM model:
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
export XLM_CHECKPOINT_PATH=/path/to/xlm/checkpoint
|
||||
export XLM_CHECKPOINT_PATH=/path/to/xlm/checkpoint
|
||||
|
||||
transformers-cli convert --model_type xlm \
|
||||
--tf_checkpoint $XLM_CHECKPOINT_PATH \
|
||||
--pytorch_dump_output $PYTORCH_DUMP_OUTPUT
|
||||
[--config XML_CONFIG] \
|
||||
[--finetuning_task_name XML_FINETUNED_TASK]
|
||||
transformers-cli convert --model_type xlm \
|
||||
--tf_checkpoint $XLM_CHECKPOINT_PATH \
|
||||
--pytorch_dump_output $PYTORCH_DUMP_OUTPUT
|
||||
[--config XML_CONFIG] \
|
||||
[--finetuning_task_name XML_FINETUNED_TASK]
|
||||
|
||||
|
||||
T5
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Here is an example of the conversion process for a pre-trained T5 model:
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
export T5=/path/to/t5/uncased_L-12_H-768_A-12
|
||||
|
||||
transformers-cli convert --model_type t5 \
|
||||
--tf_checkpoint $T5/t5_model.ckpt \
|
||||
--config $T5/t5_config.json \
|
||||
--pytorch_dump_output $T5/pytorch_model.bin
|
||||
|
@ -1,12 +1,24 @@
|
||||
..
|
||||
Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
Fine-tuning with custom datasets
|
||||
=======================================================================================================================
|
||||
|
||||
.. note::
|
||||
|
||||
The datasets used in this tutorial are available and can be more easily accessed using the `🤗 NLP library
|
||||
<https://github.com/huggingface/nlp>`_. We do not use this library to access the datasets here since this tutorial
|
||||
meant to illustrate how to work with your own data. A brief of introduction can be found at the end of the tutorial
|
||||
in the section ":ref:`nlplib`".
|
||||
The datasets used in this tutorial are available and can be more easily accessed using the `🤗 Datasets library
|
||||
<https://github.com/huggingface/datasets>`_. We do not use this library to access the datasets here since this
|
||||
tutorial meant to illustrate how to work with your own data. A brief of introduction can be found at the end of the
|
||||
tutorial in the section ":ref:`datasetslib`".
|
||||
|
||||
This tutorial will take you through several examples of using 🤗 Transformers models with your own datasets. The guide
|
||||
shows one of many valid workflows for using these models and is meant to be illustrative rather than definitive. We
|
||||
@ -29,7 +41,7 @@ Sequence Classification with IMDb Reviews
|
||||
.. note::
|
||||
|
||||
This dataset can be explored in the Hugging Face model hub (`IMDb <https://huggingface.co/datasets/imdb>`_), and
|
||||
can be alternatively downloaded with the 🤗 NLP library with ``load_dataset("imdb")``.
|
||||
can be alternatively downloaded with the 🤗 Datasets library with ``load_dataset("imdb")``.
|
||||
|
||||
In this example, we'll show how to download, tokenize, and train a model on the IMDb reviews dataset. This task takes
|
||||
the text of a review and requires the model to predict whether the sentiment of the review is positive or negative.
|
||||
@ -63,7 +75,7 @@ read this in.
|
||||
test_texts, test_labels = read_imdb_split('aclImdb/test')
|
||||
|
||||
We now have a train and test dataset, but let's also also create a validation set which we can use for for evaluation
|
||||
and tuning without training our test set results. Sklearn has a convenient utility for creating such splits:
|
||||
and tuning without tainting our test set results. Sklearn has a convenient utility for creating such splits:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
@ -248,7 +260,7 @@ Token Classification with W-NUT Emerging Entities
|
||||
.. note::
|
||||
|
||||
This dataset can be explored in the Hugging Face model hub (`WNUT-17 <https://huggingface.co/datasets/wnut_17>`_),
|
||||
and can be alternatively downloaded with the 🤗 NLP library with ``load_dataset("wnut_17")``.
|
||||
and can be alternatively downloaded with the 🤗 Datasets library with ``load_dataset("wnut_17")``.
|
||||
|
||||
Next we will look at token classification. Rather than classifying an entire sequence, this task classifies token by
|
||||
token. We'll demonstrate how to do this with `Named Entity Recognition
|
||||
@ -447,7 +459,7 @@ Question Answering with SQuAD 2.0
|
||||
.. note::
|
||||
|
||||
This dataset can be explored in the Hugging Face model hub (`SQuAD V2
|
||||
<https://huggingface.co/datasets/squad_v2>`_), and can be alternatively downloaded with the 🤗 NLP library with
|
||||
<https://huggingface.co/datasets/squad_v2>`_), and can be alternatively downloaded with the 🤗 Datasets library with
|
||||
``load_dataset("squad_v2")``.
|
||||
|
||||
Question answering comes in many forms. In this example, we'll look at the particular type of extractive QA that
|
||||
@ -547,11 +559,13 @@ we can use the built in :func:`~transformers.BatchEncoding.char_to_token` method
|
||||
for i in range(len(answers)):
|
||||
start_positions.append(encodings.char_to_token(i, answers[i]['answer_start']))
|
||||
end_positions.append(encodings.char_to_token(i, answers[i]['answer_end'] - 1))
|
||||
# if None, the answer passage has been truncated
|
||||
|
||||
# if start position is None, the answer passage has been truncated
|
||||
if start_positions[-1] is None:
|
||||
start_positions[-1] = tokenizer.model_max_length
|
||||
if end_positions[-1] is None:
|
||||
end_positions[-1] = tokenizer.model_max_length
|
||||
|
||||
encodings.update({'start_positions': start_positions, 'end_positions': end_positions})
|
||||
|
||||
add_token_positions(train_encodings, train_answers)
|
||||
@ -663,22 +677,23 @@ Additional Resources
|
||||
- :doc:`Preprocessing <preprocessing>`. Docs page on data preprocessing.
|
||||
- :doc:`Training <training>`. Docs page on training and fine-tuning.
|
||||
|
||||
.. _nlplib:
|
||||
.. _datasetslib:
|
||||
|
||||
Using the 🤗 NLP Datasets & Metrics library
|
||||
Using the 🤗 Datasets & Metrics library
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
This tutorial demonstrates how to read in datasets from various raw text formats and prepare them for training with 🤗
|
||||
Transformers so that you can do the same thing with your own custom datasets. However, we recommend users use the `🤗
|
||||
NLP library <https://github.com/huggingface/nlp>`_ for working with the 150+ datasets included in the `hub
|
||||
Datasets library <https://github.com/huggingface/datasets>`_ for working with the 150+ datasets included in the `hub
|
||||
<https://huggingface.co/datasets>`_, including the three datasets used in this tutorial. As a very brief overview, we
|
||||
will show how to use the NLP library to download and prepare the IMDb dataset from the first example, :ref:`seq_imdb`.
|
||||
will show how to use the Datasets library to download and prepare the IMDb dataset from the first example,
|
||||
:ref:`seq_imdb`.
|
||||
|
||||
Start by downloading the dataset:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from nlp import load_dataset
|
||||
from datasets import load_dataset
|
||||
train = load_dataset("imdb", split="train")
|
||||
|
||||
Each dataset has multiple columns corresponding to different features. Let's see what our columns are.
|
||||
@ -710,5 +725,5 @@ dataset elements.
|
||||
>>> {key: val.shape for key, val in train[0].items()})
|
||||
{'labels': TensorShape([]), 'input_ids': TensorShape([512]), 'attention_mask': TensorShape([512])}
|
||||
|
||||
We now have a fully-prepared dataset. Check out `the 🤗 NLP docs <https://huggingface.co/nlp/processing.html>`_ for a
|
||||
more thorough introduction.
|
||||
We now have a fully-prepared dataset. Check out `the 🤗 Datasets docs
|
||||
<https://huggingface.co/docs/datasets/processing.html>`_ for a more thorough introduction.
|
||||
|
295
docs/source/debugging.rst
Normal file
295
docs/source/debugging.rst
Normal file
@ -0,0 +1,295 @@
|
||||
..
|
||||
Copyright 2021 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
|
||||
|
||||
Debugging
|
||||
=======================================================================================================================
|
||||
|
||||
Underflow and Overflow Detection
|
||||
-----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
.. note::
|
||||
|
||||
This feature is currently available for PyTorch-only.
|
||||
|
||||
.. note::
|
||||
|
||||
This feature can be used with any ``nn.Module``-based model
|
||||
|
||||
If you start getting ``loss=NaN`` or the model inhibits some other abnormal behavior due to ``inf`` or ``nan`` in
|
||||
activations or weights one needs to discover where the first underflow or overflow happens and what led to it. Luckily
|
||||
you can accomplish that easily by activating a special module that will do the detection automatically.
|
||||
|
||||
If you're using :class:`~transformers.Trainer`, you just need to add:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
--debug underflow_overflow
|
||||
|
||||
to the normal command line arguments, or pass ``debug="underflow_overflow"`` when creating the
|
||||
:class:`~transformers.TrainingArguments` object.
|
||||
|
||||
If you're using your own training loop or another Trainer you can accomplish the same with:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from .debug_utils import DebugUnderflowOverflow
|
||||
debug_overflow = DebugUnderflowOverflow(model)
|
||||
|
||||
:class:`~transformers.debug_utils.DebugUnderflowOverflow` inserts hooks into the model that immediately after each
|
||||
forward call will test input and output variables and also the corresponding module's weights. As soon as ``inf`` or
|
||||
``nan`` is detected in at least one element of the activations or weights, the program will assert and print a report
|
||||
like this (this was caught with ``google/mt5-small`` under fp16 mixed precision):
|
||||
|
||||
.. code-block::
|
||||
|
||||
Detected inf/nan during batch_number=0
|
||||
Last 21 forward frames:
|
||||
abs min abs max metadata
|
||||
encoder.block.1.layer.1.DenseReluDense.dropout Dropout
|
||||
0.00e+00 2.57e+02 input[0]
|
||||
0.00e+00 2.85e+02 output
|
||||
[...]
|
||||
encoder.block.2.layer.0 T5LayerSelfAttention
|
||||
6.78e-04 3.15e+03 input[0]
|
||||
2.65e-04 3.42e+03 output[0]
|
||||
None output[1]
|
||||
2.25e-01 1.00e+04 output[2]
|
||||
encoder.block.2.layer.1.layer_norm T5LayerNorm
|
||||
8.69e-02 4.18e-01 weight
|
||||
2.65e-04 3.42e+03 input[0]
|
||||
1.79e-06 4.65e+00 output
|
||||
encoder.block.2.layer.1.DenseReluDense.wi_0 Linear
|
||||
2.17e-07 4.50e+00 weight
|
||||
1.79e-06 4.65e+00 input[0]
|
||||
2.68e-06 3.70e+01 output
|
||||
encoder.block.2.layer.1.DenseReluDense.wi_1 Linear
|
||||
8.08e-07 2.66e+01 weight
|
||||
1.79e-06 4.65e+00 input[0]
|
||||
1.27e-04 2.37e+02 output
|
||||
encoder.block.2.layer.1.DenseReluDense.dropout Dropout
|
||||
0.00e+00 8.76e+03 input[0]
|
||||
0.00e+00 9.74e+03 output
|
||||
encoder.block.2.layer.1.DenseReluDense.wo Linear
|
||||
1.01e-06 6.44e+00 weight
|
||||
0.00e+00 9.74e+03 input[0]
|
||||
3.18e-04 6.27e+04 output
|
||||
encoder.block.2.layer.1.DenseReluDense T5DenseGatedGeluDense
|
||||
1.79e-06 4.65e+00 input[0]
|
||||
3.18e-04 6.27e+04 output
|
||||
encoder.block.2.layer.1.dropout Dropout
|
||||
3.18e-04 6.27e+04 input[0]
|
||||
0.00e+00 inf output
|
||||
|
||||
The example output has been trimmed in the middle for brevity.
|
||||
|
||||
The second column shows the value of the absolute largest element, so if you have a closer look at the last few frames,
|
||||
the inputs and outputs were in the range of ``1e4``. So when this training was done under fp16 mixed precision the very
|
||||
last step overflowed (since under ``fp16`` the largest number before ``inf`` is ``64e3``). To avoid overflows under
|
||||
``fp16`` the activations must remain way below ``1e4``, because ``1e4 * 1e4 = 1e8`` so any matrix multiplication with
|
||||
large activations is going to lead to a numerical overflow condition.
|
||||
|
||||
At the very start of the trace you can discover at which batch number the problem occurred (here ``Detected inf/nan
|
||||
during batch_number=0`` means the problem occurred on the first batch).
|
||||
|
||||
Each reported frame starts by declaring the fully qualified entry for the corresponding module this frame is reporting
|
||||
for. If we look just at this frame:
|
||||
|
||||
.. code-block::
|
||||
|
||||
encoder.block.2.layer.1.layer_norm T5LayerNorm
|
||||
8.69e-02 4.18e-01 weight
|
||||
2.65e-04 3.42e+03 input[0]
|
||||
1.79e-06 4.65e+00 output
|
||||
|
||||
Here, ``encoder.block.2.layer.1.layer_norm`` indicates that it was a layer norm for the first layer, of the second
|
||||
block of the encoder. And the specific calls of the ``forward`` is ``T5LayerNorm``.
|
||||
|
||||
Let's look at the last few frames of that report:
|
||||
|
||||
.. code-block::
|
||||
|
||||
Detected inf/nan during batch_number=0
|
||||
Last 21 forward frames:
|
||||
abs min abs max metadata
|
||||
[...]
|
||||
encoder.block.2.layer.1.DenseReluDense.wi_0 Linear
|
||||
2.17e-07 4.50e+00 weight
|
||||
1.79e-06 4.65e+00 input[0]
|
||||
2.68e-06 3.70e+01 output
|
||||
encoder.block.2.layer.1.DenseReluDense.wi_1 Linear
|
||||
8.08e-07 2.66e+01 weight
|
||||
1.79e-06 4.65e+00 input[0]
|
||||
1.27e-04 2.37e+02 output
|
||||
encoder.block.2.layer.1.DenseReluDense.wo Linear
|
||||
1.01e-06 6.44e+00 weight
|
||||
0.00e+00 9.74e+03 input[0]
|
||||
3.18e-04 6.27e+04 output
|
||||
encoder.block.2.layer.1.DenseReluDense T5DenseGatedGeluDense
|
||||
1.79e-06 4.65e+00 input[0]
|
||||
3.18e-04 6.27e+04 output
|
||||
encoder.block.2.layer.1.dropout Dropout
|
||||
3.18e-04 6.27e+04 input[0]
|
||||
0.00e+00 inf output
|
||||
|
||||
The last frame reports for ``Dropout.forward`` function with the first entry for the only input and the second for the
|
||||
only output. You can see that it was called from an attribute ``dropout`` inside ``DenseReluDense`` class. We can see
|
||||
that it happened during the first layer, of the 2nd block, during the very first batch. Finally, the absolute largest
|
||||
input elements was ``6.27e+04`` and same for the output was ``inf``.
|
||||
|
||||
You can see here, that ``T5DenseGatedGeluDense.forward`` resulted in output activations, whose absolute max value was
|
||||
around 62.7K, which is very close to fp16's top limit of 64K. In the next frame we have ``Dropout`` which renormalizes
|
||||
the weights, after it zeroed some of the elements, which pushes the absolute max value to more than 64K, and we get an
|
||||
overlow (``inf``).
|
||||
|
||||
As you can see it's the previous frames that we need to look into when the numbers start going into very large for fp16
|
||||
numbers.
|
||||
|
||||
Let's match the report to the code from ``models/t5/modeling_t5.py``:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
class T5DenseGatedGeluDense(nn.Module):
|
||||
def __init__(self, config):
|
||||
super().__init__()
|
||||
self.wi_0 = nn.Linear(config.d_model, config.d_ff, bias=False)
|
||||
self.wi_1 = nn.Linear(config.d_model, config.d_ff, bias=False)
|
||||
self.wo = nn.Linear(config.d_ff, config.d_model, bias=False)
|
||||
self.dropout = nn.Dropout(config.dropout_rate)
|
||||
self.gelu_act = ACT2FN["gelu_new"]
|
||||
|
||||
def forward(self, hidden_states):
|
||||
hidden_gelu = self.gelu_act(self.wi_0(hidden_states))
|
||||
hidden_linear = self.wi_1(hidden_states)
|
||||
hidden_states = hidden_gelu * hidden_linear
|
||||
hidden_states = self.dropout(hidden_states)
|
||||
hidden_states = self.wo(hidden_states)
|
||||
return hidden_states
|
||||
|
||||
Now it's easy to see the ``dropout`` call, and all the previous calls as well.
|
||||
|
||||
Since the detection is happening in a forward hook, these reports are printed immediately after each ``forward``
|
||||
returns.
|
||||
|
||||
Going back to the full report, to act on it and to fix the problem, we need to go a few frames up where the numbers
|
||||
started to go up and most likely switch to the ``fp32`` mode here, so that the numbers don't overflow when multiplied
|
||||
or summed up. Of course, there might be other solutions. For example, we could turn off ``amp`` temporarily if it's
|
||||
enabled, after moving the original ``forward`` into a helper wrapper, like so:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
def _forward(self, hidden_states):
|
||||
hidden_gelu = self.gelu_act(self.wi_0(hidden_states))
|
||||
hidden_linear = self.wi_1(hidden_states)
|
||||
hidden_states = hidden_gelu * hidden_linear
|
||||
hidden_states = self.dropout(hidden_states)
|
||||
hidden_states = self.wo(hidden_states)
|
||||
return hidden_states
|
||||
|
||||
import torch
|
||||
def forward(self, hidden_states):
|
||||
if torch.is_autocast_enabled():
|
||||
with torch.cuda.amp.autocast(enabled=False):
|
||||
return self._forward(hidden_states)
|
||||
else:
|
||||
return self._forward(hidden_states)
|
||||
|
||||
Since the automatic detector only reports on inputs and outputs of full frames, once you know where to look, you may
|
||||
want to analyse the intermediary stages of any specific ``forward`` function as well. In such a case you can use the
|
||||
``detect_overflow`` helper function to inject the detector where you want it, for example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from debug_utils import detect_overflow
|
||||
|
||||
class T5LayerFF(nn.Module):
|
||||
[...]
|
||||
def forward(self, hidden_states):
|
||||
forwarded_states = self.layer_norm(hidden_states)
|
||||
detect_overflow(forwarded_states, "after layer_norm")
|
||||
forwarded_states = self.DenseReluDense(forwarded_states)
|
||||
detect_overflow(forwarded_states, "after DenseReluDense")
|
||||
return hidden_states + self.dropout(forwarded_states)
|
||||
|
||||
You can see that we added 2 of these and now we track if ``inf`` or ``nan`` for ``forwarded_states`` was detected
|
||||
somewhere in between.
|
||||
|
||||
Actually, the detector already reports these because each of the calls in the example above is a `nn.Module``, but
|
||||
let's say if you had some local direct calculations this is how you'd do that.
|
||||
|
||||
Additionally, if you're instantiating the debugger in your own code, you can adjust the number of frames printed from
|
||||
its default, e.g.:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from .debug_utils import DebugUnderflowOverflow
|
||||
debug_overflow = DebugUnderflowOverflow(model, max_frames_to_save=100)
|
||||
|
||||
Specific batch absolute mix and max value tracing
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The same debugging class can be used for per-batch tracing with the underflow/overflow detection feature turned off.
|
||||
|
||||
Let's say you want to watch the absolute min and max values for all the ingredients of each ``forward`` call of a given
|
||||
batch, and only do that for batches 1 and 3. Then you instantiate this class as:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
debug_overflow = DebugUnderflowOverflow(model, trace_batch_nums=[1,3])
|
||||
|
||||
And now full batches 1 and 3 will be traced using the same format as the underflow/overflow detector does.
|
||||
|
||||
Batches are 0-indexed.
|
||||
|
||||
This is helpful if you know that the program starts misbehaving after a certain batch number, so you can fast-forward
|
||||
right to that area. Here is a sample truncated output for such configuration:
|
||||
|
||||
.. code-block::
|
||||
|
||||
*** Starting batch number=1 ***
|
||||
abs min abs max metadata
|
||||
shared Embedding
|
||||
1.01e-06 7.92e+02 weight
|
||||
0.00e+00 2.47e+04 input[0]
|
||||
5.36e-05 7.92e+02 output
|
||||
[...]
|
||||
decoder.dropout Dropout
|
||||
1.60e-07 2.27e+01 input[0]
|
||||
0.00e+00 2.52e+01 output
|
||||
decoder T5Stack
|
||||
not a tensor output
|
||||
lm_head Linear
|
||||
1.01e-06 7.92e+02 weight
|
||||
0.00e+00 1.11e+00 input[0]
|
||||
6.06e-02 8.39e+01 output
|
||||
T5ForConditionalGeneration
|
||||
not a tensor output
|
||||
|
||||
*** Starting batch number=3 ***
|
||||
abs min abs max metadata
|
||||
shared Embedding
|
||||
1.01e-06 7.92e+02 weight
|
||||
0.00e+00 2.78e+04 input[0]
|
||||
5.36e-05 7.92e+02 output
|
||||
[...]
|
||||
|
||||
Here you will get a huge number of frames dumped - as many as there were forward calls in your model, so it may or may
|
||||
not what you want, but sometimes it can be easier to use for debugging purposes than a normal debugger. For example, if
|
||||
a problem starts happening at batch number 150. So you can dump traces for batches 149 and 150 and compare where
|
||||
numbers started to diverge.
|
||||
|
||||
You can also specify the batch number after which to stop the training, with:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
debug_overflow = DebugUnderflowOverflow(model, trace_batch_nums=[1,3], abort_after_batch_num=3)
|
62
docs/source/fast_tokenizers.rst
Normal file
62
docs/source/fast_tokenizers.rst
Normal file
@ -0,0 +1,62 @@
|
||||
Using tokenizers from 🤗 Tokenizers
|
||||
=======================================================================================================================
|
||||
|
||||
The :class:`~transformers.PreTrainedTokenizerFast` depends on the `tokenizers
|
||||
<https://huggingface.co/docs/tokenizers>`__ library. The tokenizers obtained from the 🤗 Tokenizers library can be
|
||||
loaded very simply into 🤗 Transformers.
|
||||
|
||||
Before getting in the specifics, let's first start by creating a dummy tokenizer in a few lines:
|
||||
|
||||
.. code-block::
|
||||
|
||||
>>> from tokenizers import Tokenizer
|
||||
>>> from tokenizers.models import BPE
|
||||
>>> from tokenizers.trainers import BpeTrainer
|
||||
>>> from tokenizers.pre_tokenizers import Whitespace
|
||||
|
||||
>>> tokenizer = Tokenizer(BPE(unk_token="[UNK]"))
|
||||
>>> trainer = BpeTrainer(special_tokens=["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]"])
|
||||
|
||||
>>> tokenizer.pre_tokenizer = Whitespace()
|
||||
>>> files = [...]
|
||||
>>> tokenizer.train(files, trainer)
|
||||
|
||||
We now have a tokenizer trained on the files we defined. We can either continue using it in that runtime, or save it to
|
||||
a JSON file for future re-use.
|
||||
|
||||
Loading directly from the tokenizer object
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Let's see how to leverage this tokenizer object in the 🤗 Transformers library. The
|
||||
:class:`~transformers.PreTrainedTokenizerFast` class allows for easy instantiation, by accepting the instantiated
|
||||
`tokenizer` object as an argument:
|
||||
|
||||
.. code-block::
|
||||
|
||||
>>> from transformers import PreTrainedTokenizerFast
|
||||
|
||||
>>> fast_tokenizer = PreTrainedTokenizerFast(tokenizer_object=tokenizer)
|
||||
|
||||
This object can now be used with all the methods shared by the 🤗 Transformers tokenizers! Head to :doc:`the tokenizer
|
||||
page <main_classes/tokenizer>` for more information.
|
||||
|
||||
Loading from a JSON file
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
In order to load a tokenizer from a JSON file, let's first start by saving our tokenizer:
|
||||
|
||||
.. code-block::
|
||||
|
||||
>>> tokenizer.save("tokenizer.json")
|
||||
|
||||
The path to which we saved this file can be passed to the :class:`~transformers.PreTrainedTokenizerFast` initialization
|
||||
method using the :obj:`tokenizer_file` parameter:
|
||||
|
||||
.. code-block::
|
||||
|
||||
>>> from transformers import PreTrainedTokenizerFast
|
||||
|
||||
>>> fast_tokenizer = PreTrainedTokenizerFast(tokenizer_file="tokenizer.json")
|
||||
|
||||
This object can now be used with all the methods shared by the 🤗 Transformers tokenizers! Head to :doc:`the tokenizer
|
||||
page <main_classes/tokenizer>` for more information.
|
@ -1,3 +1,15 @@
|
||||
..
|
||||
Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
Glossary
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
@ -9,22 +21,25 @@ General terms
|
||||
- CLM: causal language modeling, a pretraining task where the model reads the texts in order and has to predict the
|
||||
next word. It's usually done by reading the whole sentence but using a mask inside the model to hide the future
|
||||
tokens at a certain timestep.
|
||||
- deep learning: machine learning algorithms which uses neural networks with several layers.
|
||||
- MLM: masked language modeling, a pretraining task where the model sees a corrupted version of the texts, usually done
|
||||
by masking some tokens randomly, and has to predict the original text.
|
||||
- multimodal: a task that combines texts with another kind of inputs (for instance images).
|
||||
- NLG: natural language generation, all tasks related to generating text ( for instance talk with transformers,
|
||||
translation)
|
||||
- NLG: natural language generation, all tasks related to generating text (for instance talk with transformers,
|
||||
translation).
|
||||
- NLP: natural language processing, a generic way to say "deal with texts".
|
||||
- NLU: natural language understanding, all tasks related to understanding what is in a text (for instance classifying
|
||||
the whole text, individual words)
|
||||
the whole text, individual words).
|
||||
- pretrained model: a model that has been pretrained on some data (for instance all of Wikipedia). Pretraining methods
|
||||
involve a self-supervised objective, which can be reading the text and trying to predict the next word (see CLM) or
|
||||
masking some words and trying to predict them (see MLM).
|
||||
- RNN: recurrent neural network, a type of model that uses a loop over a layer to process texts.
|
||||
- self-attention: each element of the input finds out which other elements of the input they should attend to.
|
||||
- seq2seq or sequence-to-sequence: models that generate a new sequence from an input, like translation models, or
|
||||
summarization models (such as :doc:`Bart </model_doc/bart>` or :doc:`T5 </model_doc/t5>`).
|
||||
- token: a part of a sentence, usually a word, but can also be a subword (non-common words are often split in subwords)
|
||||
or a punctuation symbol.
|
||||
- transformer: self-attention based deep learning model architecture.
|
||||
|
||||
Model inputs
|
||||
-----------------------------------------------------------------------------------------------------------------------
|
||||
@ -167,7 +182,7 @@ such:
|
||||
|
||||
.. code-block::
|
||||
|
||||
>>> # [CLS] SEQUENCE_A [SEP] SEQUENCE_B [SEP]
|
||||
>>> # [CLS] SEQUENCE_A [SEP] SEQUENCE_B [SEP]
|
||||
|
||||
We can use our tokenizer to automatically generate such a sentence by passing the two sequences to ``tokenizer`` as two
|
||||
arguments (and not a list, like before) like this:
|
||||
@ -214,7 +229,7 @@ Contrary to RNNs that have the position of each token embedded within them, tran
|
||||
each token. Therefore, the position IDs (``position_ids``) are used by the model to identify each token's position in
|
||||
the list of tokens.
|
||||
|
||||
They are an optional parameter. If no ``position_ids`` is passed to the model, the IDs are automatically created as
|
||||
They are an optional parameter. If no ``position_ids`` are passed to the model, the IDs are automatically created as
|
||||
absolute positional embeddings.
|
||||
|
||||
Absolute positional embeddings are selected in the range ``[0, config.max_position_embeddings - 1]``. Some models use
|
||||
|
BIN
docs/source/imgs/transformers_overview.png
Normal file
BIN
docs/source/imgs/transformers_overview.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 691 KiB |
@ -1,12 +1,12 @@
|
||||
Transformers
|
||||
=======================================================================================================================
|
||||
|
||||
State-of-the-art Natural Language Processing for Pytorch and TensorFlow 2.0.
|
||||
State-of-the-art Natural Language Processing for Jax, Pytorch and TensorFlow
|
||||
|
||||
🤗 Transformers (formerly known as `pytorch-transformers` and `pytorch-pretrained-bert`) provides general-purpose
|
||||
architectures (BERT, GPT-2, RoBERTa, XLM, DistilBert, XLNet...) for Natural Language Understanding (NLU) and Natural
|
||||
Language Generation (NLG) with over 32+ pretrained models in 100+ languages and deep interoperability between
|
||||
TensorFlow 2.0 and PyTorch.
|
||||
Language Generation (NLG) with over 32+ pretrained models in 100+ languages and deep interoperability between Jax,
|
||||
PyTorch and TensorFlow.
|
||||
|
||||
This is the documentation of our repository `transformers <https://github.com/huggingface/transformers>`_.
|
||||
|
||||
@ -22,6 +22,18 @@ State-of-the-art NLP for everyone:
|
||||
- Hands-on practitioners
|
||||
- AI/ML/NLP teachers and educators
|
||||
|
||||
..
|
||||
Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
Lower compute costs, smaller carbon footprint:
|
||||
|
||||
- Researchers can share trained models instead of always retraining
|
||||
@ -31,10 +43,20 @@ Lower compute costs, smaller carbon footprint:
|
||||
Choose the right framework for every part of a model's lifetime:
|
||||
|
||||
- Train state-of-the-art models in 3 lines of code
|
||||
- Deep interoperability between TensorFlow 2.0 and PyTorch models
|
||||
- Move a single model between TF2.0/PyTorch frameworks at will
|
||||
- Deep interoperability between Jax, Pytorch and TensorFlow models
|
||||
- Move a single model between Jax/PyTorch/TensorFlow frameworks at will
|
||||
- Seamlessly pick the right framework for training, evaluation, production
|
||||
|
||||
The support for Jax is still experimental (with a few models right now), expect to see it grow in the coming months!
|
||||
|
||||
`All the model checkpoints <https://huggingface.co/models>`__ are seamlessly integrated from the huggingface.co `model
|
||||
hub <https://huggingface.co>`__ where they are uploaded directly by `users <https://huggingface.co/users>`__ and
|
||||
`organizations <https://huggingface.co/organizations>`__.
|
||||
|
||||
Current number of checkpoints: |checkpoints|
|
||||
|
||||
.. |checkpoints| image:: https://img.shields.io/endpoint?url=https://huggingface.co/api/shields/models&color=brightgreen
|
||||
|
||||
Contents
|
||||
-----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
@ -44,7 +66,7 @@ The documentation is organized in five parts:
|
||||
and a glossary.
|
||||
- **USING 🤗 TRANSFORMERS** contains general tutorials on how to use the library.
|
||||
- **ADVANCED GUIDES** contains more advanced guides that are more specific to a given script or part of the library.
|
||||
- **RESEARCH** focuses on tutorials that have less to do with how to use the library but more about general resarch in
|
||||
- **RESEARCH** focuses on tutorials that have less to do with how to use the library but more about general research in
|
||||
transformers model
|
||||
- The three last section contain the documentation of each public class and function, grouped in:
|
||||
|
||||
@ -52,7 +74,7 @@ The documentation is organized in five parts:
|
||||
- **MODELS** for the classes and functions related to each model implemented in the library.
|
||||
- **INTERNAL HELPERS** for the classes and functions we use internally.
|
||||
|
||||
The library currently contains PyTorch and Tensorflow implementations, pre-trained model weights, usage scripts and
|
||||
The library currently contains Jax, PyTorch and Tensorflow implementations, pretrained model weights, usage scripts and
|
||||
conversion utilities for the following models:
|
||||
|
||||
..
|
||||
@ -66,105 +88,300 @@ conversion utilities for the following models:
|
||||
Pre-training for Natural Language Generation, Translation, and Comprehension
|
||||
<https://arxiv.org/pdf/1910.13461.pdf>`__ by Mike Lewis, Yinhan Liu, Naman Goyal, Marjan Ghazvininejad, Abdelrahman
|
||||
Mohamed, Omer Levy, Ves Stoyanov and Luke Zettlemoyer.
|
||||
3. :doc:`BERT <model_doc/bert>` (from Google) released with the paper `BERT: Pre-training of Deep Bidirectional
|
||||
3. :doc:`BARThez <model_doc/barthez>` (from École polytechnique) released with the paper `BARThez: a Skilled Pretrained
|
||||
French Sequence-to-Sequence Model <https://arxiv.org/abs/2010.12321>`__ by Moussa Kamal Eddine, Antoine J.-P.
|
||||
Tixier, Michalis Vazirgiannis.
|
||||
4. :doc:`BERT <model_doc/bert>` (from Google) released with the paper `BERT: Pre-training of Deep Bidirectional
|
||||
Transformers for Language Understanding <https://arxiv.org/abs/1810.04805>`__ by Jacob Devlin, Ming-Wei Chang,
|
||||
Kenton Lee and Kristina Toutanova.
|
||||
4. :doc:`BERT For Sequence Generation <model_doc/bertgeneration>` (from Google) released with the paper `Leveraging
|
||||
5. :doc:`BERT For Sequence Generation <model_doc/bertgeneration>` (from Google) released with the paper `Leveraging
|
||||
Pre-trained Checkpoints for Sequence Generation Tasks <https://arxiv.org/abs/1907.12461>`__ by Sascha Rothe, Shashi
|
||||
Narayan, Aliaksei Severyn.
|
||||
5. :doc:`Blenderbot <model_doc/blenderbot>` (from Facebook) released with the paper `Recipes for building an
|
||||
6. :doc:`BigBird-RoBERTa <model_doc/bigbird>` (from Google Research) released with the paper `Big Bird: Transformers
|
||||
for Longer Sequences <https://arxiv.org/abs/2007.14062>`__ by Manzil Zaheer, Guru Guruganesh, Avinava Dubey, Joshua
|
||||
Ainslie, Chris Alberti, Santiago Ontanon, Philip Pham, Anirudh Ravula, Qifan Wang, Li Yang, Amr Ahmed.
|
||||
7. :doc:`BigBird-Pegasus <model_doc/bigbird_pegasus>` (from Google Research) released with the paper `Big Bird:
|
||||
Transformers for Longer Sequences <https://arxiv.org/abs/2007.14062>`__ by Manzil Zaheer, Guru Guruganesh, Avinava
|
||||
Dubey, Joshua Ainslie, Chris Alberti, Santiago Ontanon, Philip Pham, Anirudh Ravula, Qifan Wang, Li Yang, Amr Ahmed.
|
||||
8. :doc:`Blenderbot <model_doc/blenderbot>` (from Facebook) released with the paper `Recipes for building an
|
||||
open-domain chatbot <https://arxiv.org/abs/2004.13637>`__ by Stephen Roller, Emily Dinan, Naman Goyal, Da Ju, Mary
|
||||
Williamson, Yinhan Liu, Jing Xu, Myle Ott, Kurt Shuster, Eric M. Smith, Y-Lan Boureau, Jason Weston.
|
||||
6. :doc:`CamemBERT <model_doc/camembert>` (from Inria/Facebook/Sorbonne) released with the paper `CamemBERT: a Tasty
|
||||
French Language Model <https://arxiv.org/abs/1911.03894>`__ by Louis Martin*, Benjamin Muller*, Pedro Javier Ortiz
|
||||
Suárez*, Yoann Dupont, Laurent Romary, Éric Villemonte de la Clergerie, Djamé Seddah and Benoît Sagot.
|
||||
7. :doc:`CTRL <model_doc/ctrl>` (from Salesforce) released with the paper `CTRL: A Conditional Transformer Language
|
||||
Model for Controllable Generation <https://arxiv.org/abs/1909.05858>`__ by Nitish Shirish Keskar*, Bryan McCann*,
|
||||
Lav R. Varshney, Caiming Xiong and Richard Socher.
|
||||
8. :doc:`DeBERTa <model_doc/deberta>` (from Microsoft Research) released with the paper `DeBERTa: Decoding-enhanced
|
||||
BERT with Disentangled Attention <https://arxiv.org/abs/2006.03654>`__ by Pengcheng He, Xiaodong Liu, Jianfeng Gao,
|
||||
Weizhu Chen.
|
||||
9. :doc:`DialoGPT <model_doc/dialogpt>` (from Microsoft Research) released with the paper `DialoGPT: Large-Scale
|
||||
Generative Pre-training for Conversational Response Generation <https://arxiv.org/abs/1911.00536>`__ by Yizhe Zhang,
|
||||
Siqi Sun, Michel Galley, Yen-Chun Chen, Chris Brockett, Xiang Gao, Jianfeng Gao, Jingjing Liu, Bill Dolan.
|
||||
10. :doc:`DistilBERT <model_doc/distilbert>` (from HuggingFace), released together with the paper `DistilBERT, a
|
||||
9. :doc:`BlenderbotSmall <model_doc/blenderbot_small>` (from Facebook) released with the paper `Recipes for building an
|
||||
open-domain chatbot <https://arxiv.org/abs/2004.13637>`__ by Stephen Roller, Emily Dinan, Naman Goyal, Da Ju, Mary
|
||||
Williamson, Yinhan Liu, Jing Xu, Myle Ott, Kurt Shuster, Eric M. Smith, Y-Lan Boureau, Jason Weston.
|
||||
10. :doc:`BORT <model_doc/bort>` (from Alexa) released with the paper `Optimal Subarchitecture Extraction For BERT
|
||||
<https://arxiv.org/abs/2010.10499>`__ by Adrian de Wynter and Daniel J. Perry.
|
||||
11. :doc:`CamemBERT <model_doc/camembert>` (from Inria/Facebook/Sorbonne) released with the paper `CamemBERT: a Tasty
|
||||
French Language Model <https://arxiv.org/abs/1911.03894>`__ by Louis Martin*, Benjamin Muller*, Pedro Javier Ortiz
|
||||
Suárez*, Yoann Dupont, Laurent Romary, Éric Villemonte de la Clergerie, Djamé Seddah and Benoît Sagot.
|
||||
12. :doc:`CLIP <model_doc/clip>` from (OpenAI) released with the paper `Learning Transferable Visual Models From
|
||||
Natural Language Supervision <https://arxiv.org/abs/2103.00020>`__ by Alec Radford, Jong Wook Kim, Chris Hallacy,
|
||||
Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen
|
||||
Krueger, Ilya Sutskever.
|
||||
13. :doc:`ConvBERT <model_doc/convbert>` (from YituTech) released with the paper `ConvBERT: Improving BERT with
|
||||
Span-based Dynamic Convolution <https://arxiv.org/abs/2008.02496>`__ by Zihang Jiang, Weihao Yu, Daquan Zhou,
|
||||
Yunpeng Chen, Jiashi Feng, Shuicheng Yan.
|
||||
14. :doc:`CPM <model_doc/cpm>` (from Tsinghua University) released with the paper `CPM: A Large-scale Generative
|
||||
Chinese Pre-trained Language Model <https://arxiv.org/abs/2012.00413>`__ by Zhengyan Zhang, Xu Han, Hao Zhou, Pei
|
||||
Ke, Yuxian Gu, Deming Ye, Yujia Qin, Yusheng Su, Haozhe Ji, Jian Guan, Fanchao Qi, Xiaozhi Wang, Yanan Zheng,
|
||||
Guoyang Zeng, Huanqi Cao, Shengqi Chen, Daixuan Li, Zhenbo Sun, Zhiyuan Liu, Minlie Huang, Wentao Han, Jie Tang,
|
||||
Juanzi Li, Xiaoyan Zhu, Maosong Sun.
|
||||
15. :doc:`CTRL <model_doc/ctrl>` (from Salesforce) released with the paper `CTRL: A Conditional Transformer Language
|
||||
Model for Controllable Generation <https://arxiv.org/abs/1909.05858>`__ by Nitish Shirish Keskar*, Bryan McCann*,
|
||||
Lav R. Varshney, Caiming Xiong and Richard Socher.
|
||||
16. :doc:`DeBERTa <model_doc/deberta>` (from Microsoft) released with the paper `DeBERTa: Decoding-enhanced BERT with
|
||||
Disentangled Attention <https://arxiv.org/abs/2006.03654>`__ by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu
|
||||
Chen.
|
||||
17. :doc:`DeBERTa-v2 <model_doc/deberta_v2>` (from Microsoft) released with the paper `DeBERTa: Decoding-enhanced BERT
|
||||
with Disentangled Attention <https://arxiv.org/abs/2006.03654>`__ by Pengcheng He, Xiaodong Liu, Jianfeng Gao,
|
||||
Weizhu Chen.
|
||||
18. :doc:`DeiT <model_doc/deit>` (from Facebook) released with the paper `Training data-efficient image transformers &
|
||||
distillation through attention <https://arxiv.org/abs/2012.12877>`__ by Hugo Touvron, Matthieu Cord, Matthijs
|
||||
Douze, Francisco Massa, Alexandre Sablayrolles, Hervé Jégou.
|
||||
19. :doc:`DialoGPT <model_doc/dialogpt>` (from Microsoft Research) released with the paper `DialoGPT: Large-Scale
|
||||
Generative Pre-training for Conversational Response Generation <https://arxiv.org/abs/1911.00536>`__ by Yizhe
|
||||
Zhang, Siqi Sun, Michel Galley, Yen-Chun Chen, Chris Brockett, Xiang Gao, Jianfeng Gao, Jingjing Liu, Bill Dolan.
|
||||
20. :doc:`DistilBERT <model_doc/distilbert>` (from HuggingFace), released together with the paper `DistilBERT, a
|
||||
distilled version of BERT: smaller, faster, cheaper and lighter <https://arxiv.org/abs/1910.01108>`__ by Victor
|
||||
Sanh, Lysandre Debut and Thomas Wolf. The same method has been applied to compress GPT2 into `DistilGPT2
|
||||
<https://github.com/huggingface/transformers/tree/master/examples/distillation>`__, RoBERTa into `DistilRoBERTa
|
||||
<https://github.com/huggingface/transformers/tree/master/examples/distillation>`__, Multilingual BERT into
|
||||
`DistilmBERT <https://github.com/huggingface/transformers/tree/master/examples/distillation>`__ and a German
|
||||
version of DistilBERT.
|
||||
11. :doc:`DPR <model_doc/dpr>` (from Facebook) released with the paper `Dense Passage Retrieval for Open-Domain
|
||||
21. :doc:`DPR <model_doc/dpr>` (from Facebook) released with the paper `Dense Passage Retrieval for Open-Domain
|
||||
Question Answering <https://arxiv.org/abs/2004.04906>`__ by Vladimir Karpukhin, Barlas Oğuz, Sewon Min, Patrick
|
||||
Lewis, Ledell Wu, Sergey Edunov, Danqi Chen, and Wen-tau Yih.
|
||||
12. :doc:`ELECTRA <model_doc/electra>` (from Google Research/Stanford University) released with the paper `ELECTRA:
|
||||
22. :doc:`ELECTRA <model_doc/electra>` (from Google Research/Stanford University) released with the paper `ELECTRA:
|
||||
Pre-training text encoders as discriminators rather than generators <https://arxiv.org/abs/2003.10555>`__ by Kevin
|
||||
Clark, Minh-Thang Luong, Quoc V. Le, Christopher D. Manning.
|
||||
13. :doc:`FlauBERT <model_doc/flaubert>` (from CNRS) released with the paper `FlauBERT: Unsupervised Language Model
|
||||
23. :doc:`FlauBERT <model_doc/flaubert>` (from CNRS) released with the paper `FlauBERT: Unsupervised Language Model
|
||||
Pre-training for French <https://arxiv.org/abs/1912.05372>`__ by Hang Le, Loïc Vial, Jibril Frej, Vincent Segonne,
|
||||
Maximin Coavoux, Benjamin Lecouteux, Alexandre Allauzen, Benoît Crabbé, Laurent Besacier, Didier Schwab.
|
||||
14. :doc:`Funnel Transformer <model_doc/funnel>` (from CMU/Google Brain) released with the paper `Funnel-Transformer:
|
||||
24. :doc:`Funnel Transformer <model_doc/funnel>` (from CMU/Google Brain) released with the paper `Funnel-Transformer:
|
||||
Filtering out Sequential Redundancy for Efficient Language Processing <https://arxiv.org/abs/2006.03236>`__ by
|
||||
Zihang Dai, Guokun Lai, Yiming Yang, Quoc V. Le.
|
||||
15. :doc:`GPT <model_doc/gpt>` (from OpenAI) released with the paper `Improving Language Understanding by Generative
|
||||
25. :doc:`GPT <model_doc/gpt>` (from OpenAI) released with the paper `Improving Language Understanding by Generative
|
||||
Pre-Training <https://blog.openai.com/language-unsupervised/>`__ by Alec Radford, Karthik Narasimhan, Tim Salimans
|
||||
and Ilya Sutskever.
|
||||
16. :doc:`GPT-2 <model_doc/gpt2>` (from OpenAI) released with the paper `Language Models are Unsupervised Multitask
|
||||
26. :doc:`GPT-2 <model_doc/gpt2>` (from OpenAI) released with the paper `Language Models are Unsupervised Multitask
|
||||
Learners <https://blog.openai.com/better-language-models/>`__ by Alec Radford*, Jeffrey Wu*, Rewon Child, David
|
||||
Luan, Dario Amodei** and Ilya Sutskever**.
|
||||
17. :doc:`LayoutLM <model_doc/layoutlm>` (from Microsoft Research Asia) released with the paper `LayoutLM: Pre-training
|
||||
27. :doc:`GPT Neo <model_doc/gpt_neo>` (from EleutherAI) released in the repository `EleutherAI/gpt-neo
|
||||
<https://github.com/EleutherAI/gpt-neo>`__ by Sid Black, Stella Biderman, Leo Gao, Phil Wang and Connor Leahy.
|
||||
28. :doc:`I-BERT <model_doc/ibert>` (from Berkeley) released with the paper `I-BERT: Integer-only BERT Quantization
|
||||
<https://arxiv.org/abs/2101.01321>`__ by Sehoon Kim, Amir Gholami, Zhewei Yao, Michael W. Mahoney, Kurt Keutzer
|
||||
29. :doc:`LayoutLM <model_doc/layoutlm>` (from Microsoft Research Asia) released with the paper `LayoutLM: Pre-training
|
||||
of Text and Layout for Document Image Understanding <https://arxiv.org/abs/1912.13318>`__ by Yiheng Xu, Minghao Li,
|
||||
Lei Cui, Shaohan Huang, Furu Wei, Ming Zhou.
|
||||
18. :doc:`Longformer <model_doc/longformer>` (from AllenAI) released with the paper `Longformer: The Long-Document
|
||||
30. :doc:`LED <model_doc/led>` (from AllenAI) released with the paper `Longformer: The Long-Document Transformer
|
||||
<https://arxiv.org/abs/2004.05150>`__ by Iz Beltagy, Matthew E. Peters, Arman Cohan.
|
||||
31. :doc:`Longformer <model_doc/longformer>` (from AllenAI) released with the paper `Longformer: The Long-Document
|
||||
Transformer <https://arxiv.org/abs/2004.05150>`__ by Iz Beltagy, Matthew E. Peters, Arman Cohan.
|
||||
19. :doc:`LXMERT <model_doc/lxmert>` (from UNC Chapel Hill) released with the paper `LXMERT: Learning Cross-Modality
|
||||
32. :doc:`LUKE <model_doc/luke>` (from Studio Ousia) released with the paper `LUKE: Deep Contextualized Entity
|
||||
Representations with Entity-aware Self-attention <https://arxiv.org/abs/2010.01057>`__ by Ikuya Yamada, Akari Asai,
|
||||
Hiroyuki Shindo, Hideaki Takeda, Yuji Matsumoto.
|
||||
33. :doc:`LXMERT <model_doc/lxmert>` (from UNC Chapel Hill) released with the paper `LXMERT: Learning Cross-Modality
|
||||
Encoder Representations from Transformers for Open-Domain Question Answering <https://arxiv.org/abs/1908.07490>`__
|
||||
by Hao Tan and Mohit Bansal.
|
||||
20. :doc:`MarianMT <model_doc/marian>` Machine translation models trained using `OPUS <http://opus.nlpl.eu/>`__ data by
|
||||
34. :doc:`M2M100 <model_doc/m2m_100>` (from Facebook) released with the paper `Beyond English-Centric Multilingual
|
||||
Machine Translation <https://arxiv.org/abs/2010.11125>`__ by by Angela Fan, Shruti Bhosale, Holger Schwenk, Zhiyi
|
||||
Ma, Ahmed El-Kishky, Siddharth Goyal, Mandeep Baines, Onur Celebi, Guillaume Wenzek, Vishrav Chaudhary, Naman
|
||||
Goyal, Tom Birch, Vitaliy Liptchinsky, Sergey Edunov, Edouard Grave, Michael Auli, Armand Joulin.
|
||||
35. :doc:`MarianMT <model_doc/marian>` Machine translation models trained using `OPUS <http://opus.nlpl.eu/>`__ data by
|
||||
Jörg Tiedemann. The `Marian Framework <https://marian-nmt.github.io/>`__ is being developed by the Microsoft
|
||||
Translator Team.
|
||||
21. :doc:`MBart <model_doc/mbart>` (from Facebook) released with the paper `Multilingual Denoising Pre-training for
|
||||
36. :doc:`MBart <model_doc/mbart>` (from Facebook) released with the paper `Multilingual Denoising Pre-training for
|
||||
Neural Machine Translation <https://arxiv.org/abs/2001.08210>`__ by Yinhan Liu, Jiatao Gu, Naman Goyal, Xian Li,
|
||||
Sergey Edunov, Marjan Ghazvininejad, Mike Lewis, Luke Zettlemoyer.
|
||||
22. :doc:`MT5 <model_doc/mt5>` (from Google AI) released with the paper `mT5: A massively multilingual pre-trained
|
||||
37. :doc:`MBart-50 <model_doc/mbart>` (from Facebook) released with the paper `Multilingual Translation with Extensible
|
||||
Multilingual Pretraining and Finetuning <https://arxiv.org/abs/2008.00401>`__ by Yuqing Tang, Chau Tran, Xian Li,
|
||||
Peng-Jen Chen, Naman Goyal, Vishrav Chaudhary, Jiatao Gu, Angela Fan.
|
||||
38. :doc:`Megatron-BERT <model_doc/megatron_bert>` (from NVIDIA) released with the paper `Megatron-LM: Training
|
||||
Multi-Billion Parameter Language Models Using Model Parallelism <https://arxiv.org/abs/1909.08053>`__ by Mohammad
|
||||
Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper and Bryan Catanzaro.
|
||||
39. :doc:`Megatron-GPT2 <model_doc/megatron_gpt2>` (from NVIDIA) released with the paper `Megatron-LM: Training
|
||||
Multi-Billion Parameter Language Models Using Model Parallelism <https://arxiv.org/abs/1909.08053>`__ by Mohammad
|
||||
Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper and Bryan Catanzaro.
|
||||
40. :doc:`MPNet <model_doc/mpnet>` (from Microsoft Research) released with the paper `MPNet: Masked and Permuted
|
||||
Pre-training for Language Understanding <https://arxiv.org/abs/2004.09297>`__ by Kaitao Song, Xu Tan, Tao Qin,
|
||||
Jianfeng Lu, Tie-Yan Liu.
|
||||
41. :doc:`MT5 <model_doc/mt5>` (from Google AI) released with the paper `mT5: A massively multilingual pre-trained
|
||||
text-to-text transformer <https://arxiv.org/abs/2010.11934>`__ by Linting Xue, Noah Constant, Adam Roberts, Mihir
|
||||
Kale, Rami Al-Rfou, Aditya Siddhant, Aditya Barua, Colin Raffel.
|
||||
23. :doc:`Pegasus <model_doc/pegasus>` (from Google) released with the paper `PEGASUS: Pre-training with Extracted
|
||||
42. :doc:`Pegasus <model_doc/pegasus>` (from Google) released with the paper `PEGASUS: Pre-training with Extracted
|
||||
Gap-sentences for Abstractive Summarization <https://arxiv.org/abs/1912.08777>`__> by Jingqing Zhang, Yao Zhao,
|
||||
Mohammad Saleh and Peter J. Liu.
|
||||
24. :doc:`ProphetNet <model_doc/prophetnet>` (from Microsoft Research) released with the paper `ProphetNet: Predicting
|
||||
43. :doc:`ProphetNet <model_doc/prophetnet>` (from Microsoft Research) released with the paper `ProphetNet: Predicting
|
||||
Future N-gram for Sequence-to-Sequence Pre-training <https://arxiv.org/abs/2001.04063>`__ by Yu Yan, Weizhen Qi,
|
||||
Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang and Ming Zhou.
|
||||
25. :doc:`Reformer <model_doc/reformer>` (from Google Research) released with the paper `Reformer: The Efficient
|
||||
44. :doc:`Reformer <model_doc/reformer>` (from Google Research) released with the paper `Reformer: The Efficient
|
||||
Transformer <https://arxiv.org/abs/2001.04451>`__ by Nikita Kitaev, Łukasz Kaiser, Anselm Levskaya.
|
||||
26. :doc:`RoBERTa <model_doc/roberta>` (from Facebook), released together with the paper a `Robustly Optimized BERT
|
||||
45. :doc:`RoBERTa <model_doc/roberta>` (from Facebook), released together with the paper a `Robustly Optimized BERT
|
||||
Pretraining Approach <https://arxiv.org/abs/1907.11692>`__ by Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar
|
||||
Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, Veselin Stoyanov. ultilingual BERT into `DistilmBERT
|
||||
<https://github.com/huggingface/transformers/tree/master/examples/distillation>`__ and a German version of
|
||||
DistilBERT.
|
||||
27. :doc:`SqueezeBert <model_doc/squeezebert>` released with the paper `SqueezeBERT: What can computer vision teach NLP
|
||||
Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, Veselin Stoyanov.
|
||||
46. :doc:`SpeechToTextTransformer <model_doc/speech_to_text>` (from Facebook), released together with the paper
|
||||
`fairseq S2T: Fast Speech-to-Text Modeling with fairseq <https://arxiv.org/abs/2010.05171>`__ by Changhan Wang, Yun
|
||||
Tang, Xutai Ma, Anne Wu, Dmytro Okhonko, Juan Pino.
|
||||
47. :doc:`SqueezeBert <model_doc/squeezebert>` released with the paper `SqueezeBERT: What can computer vision teach NLP
|
||||
about efficient neural networks? <https://arxiv.org/abs/2006.11316>`__ by Forrest N. Iandola, Albert E. Shaw, Ravi
|
||||
Krishna, and Kurt W. Keutzer.
|
||||
28. :doc:`T5 <model_doc/t5>` (from Google AI) released with the paper `Exploring the Limits of Transfer Learning with a
|
||||
48. :doc:`T5 <model_doc/t5>` (from Google AI) released with the paper `Exploring the Limits of Transfer Learning with a
|
||||
Unified Text-to-Text Transformer <https://arxiv.org/abs/1910.10683>`__ by Colin Raffel and Noam Shazeer and Adam
|
||||
Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu.
|
||||
29. :doc:`Transformer-XL <model_doc/transformerxl>` (from Google/CMU) released with the paper `Transformer-XL:
|
||||
49. :doc:`TAPAS <model_doc/tapas>` (from Google AI) released with the paper `TAPAS: Weakly Supervised Table Parsing via
|
||||
Pre-training <https://arxiv.org/abs/2004.02349>`__ by Jonathan Herzig, Paweł Krzysztof Nowak, Thomas Müller,
|
||||
Francesco Piccinno and Julian Martin Eisenschlos.
|
||||
50. :doc:`Transformer-XL <model_doc/transformerxl>` (from Google/CMU) released with the paper `Transformer-XL:
|
||||
Attentive Language Models Beyond a Fixed-Length Context <https://arxiv.org/abs/1901.02860>`__ by Zihang Dai*,
|
||||
Zhilin Yang*, Yiming Yang, Jaime Carbonell, Quoc V. Le, Ruslan Salakhutdinov.
|
||||
30. :doc:`XLM <model_doc/xlm>` (from Facebook) released together with the paper `Cross-lingual Language Model
|
||||
51. :doc:`Vision Transformer (ViT) <model_doc/vit>` (from Google AI) released with the paper `An Image is Worth 16x16
|
||||
Words: Transformers for Image Recognition at Scale <https://arxiv.org/abs/2010.11929>`__ by Alexey Dosovitskiy,
|
||||
Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias
|
||||
Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, Neil Houlsby.
|
||||
52. :doc:`Wav2Vec2 <model_doc/wav2vec2>` (from Facebook AI) released with the paper `wav2vec 2.0: A Framework for
|
||||
Self-Supervised Learning of Speech Representations <https://arxiv.org/abs/2006.11477>`__ by Alexei Baevski, Henry
|
||||
Zhou, Abdelrahman Mohamed, Michael Auli.
|
||||
53. :doc:`XLM <model_doc/xlm>` (from Facebook) released together with the paper `Cross-lingual Language Model
|
||||
Pretraining <https://arxiv.org/abs/1901.07291>`__ by Guillaume Lample and Alexis Conneau.
|
||||
31. :doc:`XLM-ProphetNet <model_doc/xlmprophetnet>` (from Microsoft Research) released with the paper `ProphetNet:
|
||||
54. :doc:`XLM-ProphetNet <model_doc/xlmprophetnet>` (from Microsoft Research) released with the paper `ProphetNet:
|
||||
Predicting Future N-gram for Sequence-to-Sequence Pre-training <https://arxiv.org/abs/2001.04063>`__ by Yu Yan,
|
||||
Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang and Ming Zhou.
|
||||
32. :doc:`XLM-RoBERTa <model_doc/xlmroberta>` (from Facebook AI), released together with the paper `Unsupervised
|
||||
55. :doc:`XLM-RoBERTa <model_doc/xlmroberta>` (from Facebook AI), released together with the paper `Unsupervised
|
||||
Cross-lingual Representation Learning at Scale <https://arxiv.org/abs/1911.02116>`__ by Alexis Conneau*, Kartikay
|
||||
Khandelwal*, Naman Goyal, Vishrav Chaudhary, Guillaume Wenzek, Francisco Guzmán, Edouard Grave, Myle Ott, Luke
|
||||
Zettlemoyer and Veselin Stoyanov.
|
||||
33. :doc:`XLNet <model_doc/xlnet>` (from Google/CMU) released with the paper `XLNet: Generalized Autoregressive
|
||||
56. :doc:`XLNet <model_doc/xlnet>` (from Google/CMU) released with the paper `XLNet: Generalized Autoregressive
|
||||
Pretraining for Language Understanding <https://arxiv.org/abs/1906.08237>`__ by Zhilin Yang*, Zihang Dai*, Yiming
|
||||
Yang, Jaime Carbonell, Ruslan Salakhutdinov, Quoc V. Le.
|
||||
34. `Other community models <https://huggingface.co/models>`__, contributed by the `community
|
||||
<https://huggingface.co/users>`__.
|
||||
57. :doc:`XLSR-Wav2Vec2 <model_doc/xlsr_wav2vec2>` (from Facebook AI) released with the paper `Unsupervised
|
||||
Cross-Lingual Representation Learning For Speech Recognition <https://arxiv.org/abs/2006.13979>`__ by Alexis
|
||||
Conneau, Alexei Baevski, Ronan Collobert, Abdelrahman Mohamed, Michael Auli.
|
||||
|
||||
|
||||
.. _bigtable:
|
||||
|
||||
The table below represents the current support in the library for each of those models, whether they have a Python
|
||||
tokenizer (called "slow"). A "fast" tokenizer backed by the 🤗 Tokenizers library, whether they have support in Jax (via
|
||||
Flax), PyTorch, and/or TensorFlow.
|
||||
|
||||
..
|
||||
This table is updated automatically from the auto modules with `make fix-copies`. Do not update manually!
|
||||
|
||||
.. rst-class:: center-aligned-table
|
||||
|
||||
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
|
||||
| Model | Tokenizer slow | Tokenizer fast | PyTorch support | TensorFlow support | Flax Support |
|
||||
+=============================+================+================+=================+====================+==============+
|
||||
| ALBERT | ✅ | ✅ | ✅ | ✅ | ❌ |
|
||||
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
|
||||
| BART | ✅ | ✅ | ✅ | ✅ | ❌ |
|
||||
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
|
||||
| BERT | ✅ | ✅ | ✅ | ✅ | ✅ |
|
||||
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
|
||||
| Bert Generation | ✅ | ❌ | ✅ | ❌ | ❌ |
|
||||
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
|
||||
| BigBird | ✅ | ✅ | ✅ | ❌ | ❌ |
|
||||
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
|
||||
| BigBirdPegasus | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
|
||||
| Blenderbot | ✅ | ❌ | ✅ | ✅ | ❌ |
|
||||
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
|
||||
| BlenderbotSmall | ✅ | ❌ | ✅ | ✅ | ❌ |
|
||||
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
|
||||
| CLIP | ✅ | ✅ | ✅ | ❌ | ❌ |
|
||||
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
|
||||
| CTRL | ✅ | ❌ | ✅ | ✅ | ❌ |
|
||||
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
|
||||
| CamemBERT | ✅ | ✅ | ✅ | ✅ | ❌ |
|
||||
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
|
||||
| ConvBERT | ✅ | ✅ | ✅ | ✅ | ❌ |
|
||||
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
|
||||
| DPR | ✅ | ✅ | ✅ | ✅ | ❌ |
|
||||
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
|
||||
| DeBERTa | ✅ | ✅ | ✅ | ❌ | ❌ |
|
||||
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
|
||||
| DeBERTa-v2 | ✅ | ❌ | ✅ | ❌ | ❌ |
|
||||
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
|
||||
| DeiT | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
|
||||
| DistilBERT | ✅ | ✅ | ✅ | ✅ | ❌ |
|
||||
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
|
||||
| ELECTRA | ✅ | ✅ | ✅ | ✅ | ✅ |
|
||||
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
|
||||
| Encoder decoder | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
|
||||
| FairSeq Machine-Translation | ✅ | ❌ | ✅ | ❌ | ❌ |
|
||||
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
|
||||
| FlauBERT | ✅ | ❌ | ✅ | ✅ | ❌ |
|
||||
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
|
||||
| Funnel Transformer | ✅ | ✅ | ✅ | ✅ | ❌ |
|
||||
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
|
||||
| GPT Neo | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
|
||||
| I-BERT | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
|
||||
| LED | ✅ | ✅ | ✅ | ✅ | ❌ |
|
||||
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
|
||||
| LUKE | ✅ | ❌ | ✅ | ❌ | ❌ |
|
||||
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
|
||||
| LXMERT | ✅ | ✅ | ✅ | ✅ | ❌ |
|
||||
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
|
||||
| LayoutLM | ✅ | ✅ | ✅ | ✅ | ❌ |
|
||||
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
|
||||
| Longformer | ✅ | ✅ | ✅ | ✅ | ❌ |
|
||||
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
|
||||
| M2M100 | ✅ | ❌ | ✅ | ❌ | ❌ |
|
||||
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
|
||||
| MPNet | ✅ | ✅ | ✅ | ✅ | ❌ |
|
||||
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
|
||||
| Marian | ✅ | ❌ | ✅ | ✅ | ❌ |
|
||||
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
|
||||
| MegatronBert | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
|
||||
| MobileBERT | ✅ | ✅ | ✅ | ✅ | ❌ |
|
||||
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
|
||||
| OpenAI GPT | ✅ | ✅ | ✅ | ✅ | ❌ |
|
||||
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
|
||||
| OpenAI GPT-2 | ✅ | ✅ | ✅ | ✅ | ❌ |
|
||||
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
|
||||
| Pegasus | ✅ | ✅ | ✅ | ✅ | ❌ |
|
||||
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
|
||||
| ProphetNet | ✅ | ❌ | ✅ | ❌ | ❌ |
|
||||
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
|
||||
| RAG | ✅ | ❌ | ✅ | ✅ | ❌ |
|
||||
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
|
||||
| Reformer | ✅ | ✅ | ✅ | ❌ | ❌ |
|
||||
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
|
||||
| RetriBERT | ✅ | ✅ | ✅ | ❌ | ❌ |
|
||||
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
|
||||
| RoBERTa | ✅ | ✅ | ✅ | ✅ | ✅ |
|
||||
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
|
||||
| Speech2Text | ✅ | ❌ | ✅ | ❌ | ❌ |
|
||||
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
|
||||
| SqueezeBERT | ✅ | ✅ | ✅ | ❌ | ❌ |
|
||||
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
|
||||
| T5 | ✅ | ✅ | ✅ | ✅ | ❌ |
|
||||
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
|
||||
| TAPAS | ✅ | ❌ | ✅ | ❌ | ❌ |
|
||||
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
|
||||
| Transformer-XL | ✅ | ❌ | ✅ | ✅ | ❌ |
|
||||
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
|
||||
| ViT | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
|
||||
| Wav2Vec2 | ✅ | ❌ | ✅ | ❌ | ❌ |
|
||||
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
|
||||
| XLM | ✅ | ❌ | ✅ | ✅ | ❌ |
|
||||
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
|
||||
| XLM-RoBERTa | ✅ | ✅ | ✅ | ✅ | ❌ |
|
||||
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
|
||||
| XLMProphetNet | ✅ | ❌ | ✅ | ❌ | ❌ |
|
||||
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
|
||||
| XLNet | ✅ | ✅ | ✅ | ✅ | ❌ |
|
||||
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
|
||||
| mBART | ✅ | ✅ | ✅ | ✅ | ❌ |
|
||||
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
|
||||
| mT5 | ✅ | ✅ | ✅ | ✅ | ❌ |
|
||||
+-----------------------------+----------------+----------------+-----------------+--------------------+--------------+
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
@ -193,12 +410,18 @@ conversion utilities for the following models:
|
||||
|
||||
pretrained_models
|
||||
examples
|
||||
troubleshooting
|
||||
custom_datasets
|
||||
notebooks
|
||||
sagemaker
|
||||
community
|
||||
converting_tensorflow_models
|
||||
migration
|
||||
contributing
|
||||
add_new_model
|
||||
fast_tokenizers
|
||||
testing
|
||||
debugging
|
||||
serialization
|
||||
|
||||
.. toctree::
|
||||
@ -215,6 +438,7 @@ conversion utilities for the following models:
|
||||
|
||||
main_classes/callback
|
||||
main_classes/configuration
|
||||
main_classes/data_collator
|
||||
main_classes/logging
|
||||
main_classes/model
|
||||
main_classes/optimizer_schedules
|
||||
@ -223,6 +447,7 @@ conversion utilities for the following models:
|
||||
main_classes/processors
|
||||
main_classes/tokenizer
|
||||
main_classes/trainer
|
||||
main_classes/feature_extractor
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
@ -231,12 +456,24 @@ conversion utilities for the following models:
|
||||
model_doc/albert
|
||||
model_doc/auto
|
||||
model_doc/bart
|
||||
model_doc/barthez
|
||||
model_doc/bert
|
||||
model_doc/bertweet
|
||||
model_doc/bertgeneration
|
||||
model_doc/bert_japanese
|
||||
model_doc/bigbird
|
||||
model_doc/bigbird_pegasus
|
||||
model_doc/blenderbot
|
||||
model_doc/blenderbot_small
|
||||
model_doc/bort
|
||||
model_doc/camembert
|
||||
model_doc/clip
|
||||
model_doc/convbert
|
||||
model_doc/cpm
|
||||
model_doc/ctrl
|
||||
model_doc/deberta
|
||||
model_doc/deberta_v2
|
||||
model_doc/deit
|
||||
model_doc/dialogpt
|
||||
model_doc/distilbert
|
||||
model_doc/dpr
|
||||
@ -245,28 +482,43 @@ conversion utilities for the following models:
|
||||
model_doc/flaubert
|
||||
model_doc/fsmt
|
||||
model_doc/funnel
|
||||
model_doc/herbert
|
||||
model_doc/ibert
|
||||
model_doc/layoutlm
|
||||
model_doc/led
|
||||
model_doc/longformer
|
||||
model_doc/luke
|
||||
model_doc/lxmert
|
||||
model_doc/marian
|
||||
model_doc/m2m_100
|
||||
model_doc/mbart
|
||||
model_doc/megatron_bert
|
||||
model_doc/megatron_gpt2
|
||||
model_doc/mobilebert
|
||||
model_doc/mpnet
|
||||
model_doc/mt5
|
||||
model_doc/gpt
|
||||
model_doc/gpt2
|
||||
model_doc/gpt_neo
|
||||
model_doc/pegasus
|
||||
model_doc/phobert
|
||||
model_doc/prophetnet
|
||||
model_doc/rag
|
||||
model_doc/reformer
|
||||
model_doc/retribert
|
||||
model_doc/roberta
|
||||
model_doc/speech_to_text
|
||||
model_doc/squeezebert
|
||||
model_doc/t5
|
||||
model_doc/tapas
|
||||
model_doc/transformerxl
|
||||
model_doc/vit
|
||||
model_doc/wav2vec2
|
||||
model_doc/xlm
|
||||
model_doc/xlmprophetnet
|
||||
model_doc/xlmroberta
|
||||
model_doc/xlnet
|
||||
model_doc/xlsr_wav2vec2
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
@ -277,3 +529,4 @@ conversion utilities for the following models:
|
||||
internal/tokenization_utils
|
||||
internal/trainer_utils
|
||||
internal/generation_utils
|
||||
internal/file_utils
|
||||
|
@ -1,9 +1,25 @@
|
||||
<!---
|
||||
Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
-->
|
||||
|
||||
# Installation
|
||||
|
||||
🤗 Transformers is tested on Python 3.6+, and PyTorch 1.1.0+ or TensorFlow 2.0+.
|
||||
|
||||
You should install 🤗 Transformers in a [virtual environment](https://docs.python.org/3/library/venv.html). If you're
|
||||
unfamiliar with Python virtual environments, check out the [user guide](https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/). Create a virtual environment with the version of Python you're going
|
||||
unfamiliar with Python virtual environments, check out the [user guide](https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/). Create a virtual environment with the version of Python you're going
|
||||
to use and activate it.
|
||||
|
||||
Now, if you want to use 🤗 Transformers, you can install it with pip. If you'd like to play with the examples, you
|
||||
@ -12,9 +28,10 @@ must install it from source.
|
||||
## Installation with pip
|
||||
|
||||
First you need to install one of, or both, TensorFlow 2.0 and PyTorch.
|
||||
Please refer to [TensorFlow installation page](https://www.tensorflow.org/install/pip#tensorflow-2.0-rc-is-available)
|
||||
and/or [PyTorch installation page](https://pytorch.org/get-started/locally/#start-locally) regarding the specific
|
||||
install command for your platform.
|
||||
Please refer to [TensorFlow installation page](https://www.tensorflow.org/install/pip#tensorflow-2.0-rc-is-available),
|
||||
[PyTorch installation page](https://pytorch.org/get-started/locally/#start-locally) and/or
|
||||
[Flax installation page](https://github.com/google/flax#quick-install)
|
||||
regarding the specific install command for your platform.
|
||||
|
||||
When TensorFlow 2.0 and/or PyTorch has been installed, 🤗 Transformers can be installed using pip as follows:
|
||||
|
||||
@ -34,6 +51,12 @@ or 🤗 Transformers and TensorFlow 2.0 in one line with:
|
||||
pip install transformers[tf-cpu]
|
||||
```
|
||||
|
||||
or 🤗 Transformers and Flax in one line with:
|
||||
|
||||
```bash
|
||||
pip install transformers[flax]
|
||||
```
|
||||
|
||||
To check 🤗 Transformers is properly installed, run the following command:
|
||||
|
||||
```bash
|
||||
@ -50,15 +73,17 @@ It should download a pretrained model then print something like
|
||||
|
||||
## Installing from source
|
||||
|
||||
To install from source, clone the repository and install with the following commands:
|
||||
Here is how to quickly install `transformers` from source:
|
||||
|
||||
``` bash
|
||||
git clone https://github.com/huggingface/transformers.git
|
||||
cd transformers
|
||||
pip install -e .
|
||||
```bash
|
||||
pip install git+https://github.com/huggingface/transformers
|
||||
```
|
||||
|
||||
Again, you can run
|
||||
Note that this will install not the latest released version, but the bleeding edge `master` version, which you may want to use in case a bug has been fixed since the last official release and a new release hasn't been yet rolled out.
|
||||
|
||||
While we strive to keep `master` operational at all times, if you notice some issues, they usually get fixed within a few hours or a day and and you're more than welcome to help us detect any problems by opening an [Issue](https://github.com/huggingface/transformers/issues) and this way, things will get fixed even sooner.
|
||||
|
||||
Again, you can run:
|
||||
|
||||
```bash
|
||||
python -c "from transformers import pipeline; print(pipeline('sentiment-analysis')('I hate you'))"
|
||||
@ -66,37 +91,96 @@ python -c "from transformers import pipeline; print(pipeline('sentiment-analysis
|
||||
|
||||
to check 🤗 Transformers is properly installed.
|
||||
|
||||
## Editable install
|
||||
|
||||
If you want to constantly use the bleeding edge `master` version of the source code, or if you want to contribute to the library and need to test the changes in the code you're making, you will need an editable install. This is done by cloning the repository and installing with the following commands:
|
||||
|
||||
``` bash
|
||||
git clone https://github.com/huggingface/transformers.git
|
||||
cd transformers
|
||||
pip install -e .
|
||||
```
|
||||
|
||||
This command performs a magical link between the folder you cloned the repository to and your python library paths, and it'll look inside this folder in addition to the normal library-wide paths. So if normally your python packages get installed into:
|
||||
```
|
||||
~/anaconda3/envs/main/lib/python3.7/site-packages/
|
||||
```
|
||||
now this editable install will reside where you clone the folder to, e.g. `~/transformers/` and python will search it too.
|
||||
|
||||
Do note that you have to keep that `transformers` folder around and not delete it to continue using the `transfomers` library.
|
||||
|
||||
Now, let's get to the real benefit of this installation approach. Say, you saw some new feature has been just committed into `master`. If you have already performed all the steps above, to update your transformers to include all the latest commits, all you need to do is to `cd` into that cloned repository folder and update the clone to the latest version:
|
||||
|
||||
```
|
||||
cd ~/transformers/
|
||||
git pull
|
||||
```
|
||||
|
||||
There is nothing else to do. Your python environment will find the bleeding edge version of `transformers` on the next run.
|
||||
|
||||
|
||||
## With conda
|
||||
|
||||
Since Transformers version v4.0.0, we now have a conda channel: `huggingface`.
|
||||
|
||||
🤗 Transformers can be installed using conda as follows:
|
||||
|
||||
```
|
||||
conda install -c huggingface transformers
|
||||
```
|
||||
|
||||
Follow the installation pages of TensorFlow, PyTorch or Flax to see how to install them with conda.
|
||||
|
||||
## Caching models
|
||||
|
||||
This library provides pretrained models that will be downloaded and cached locally. Unless you specify a location with
|
||||
`cache_dir=...` when you use methods like `from_pretrained`, these models will automatically be downloaded in the
|
||||
folder given by the shell environment variable ``TRANSFORMERS_CACHE``. The default value for it will be the PyTorch
|
||||
cache home followed by ``/transformers/`` (even if you don't have PyTorch installed). This is (by order of priority):
|
||||
folder given by the shell environment variable ``TRANSFORMERS_CACHE``. The default value for it will be the Hugging
|
||||
Face cache home followed by ``/transformers/``. This is (by order of priority):
|
||||
|
||||
* shell environment variable ``TORCH_HOME``
|
||||
* shell environment variable ``XDG_CACHE_HOME`` + ``/torch/``
|
||||
* default: ``~/.cache/torch/``
|
||||
* shell environment variable ``HF_HOME``
|
||||
* shell environment variable ``XDG_CACHE_HOME`` + ``/huggingface/``
|
||||
* default: ``~/.cache/huggingface/``
|
||||
|
||||
So if you don't have any specific environment variable set, the cache directory will be at
|
||||
``~/.cache/torch/transformers/``.
|
||||
``~/.cache/huggingface/transformers/``.
|
||||
|
||||
**Note:** If you have set a shell environment variable for one of the predecessors of this library
|
||||
(``PYTORCH_TRANSFORMERS_CACHE`` or ``PYTORCH_PRETRAINED_BERT_CACHE``), those will be used if there is no shell
|
||||
environment variable for ``TRANSFORMERS_CACHE``.
|
||||
|
||||
### Note on model downloads (Continuous Integration or large-scale deployments)
|
||||
### Offline mode
|
||||
|
||||
It's possible to run 🤗 Transformers in a firewalled or a no-network environment.
|
||||
|
||||
Setting environment variable `TRANSFORMERS_OFFLINE=1` will tell 🤗 Transformers to use local files only and will not try to look things up.
|
||||
|
||||
Most likely you may want to couple this with `HF_DATASETS_OFFLINE=1` that performs the same for 🤗 Datasets if you're using the latter.
|
||||
|
||||
Here is an example of how this can be used on a filesystem that is shared between a normally networked and a firewalled to the external world instances.
|
||||
|
||||
On the instance with the normal network run your program which will download and cache models (and optionally datasets if you use 🤗 Datasets). For example:
|
||||
|
||||
```
|
||||
python examples/pytorch/translation/run_translation.py --model_name_or_path t5-small --dataset_name wmt16 --dataset_config ro-en ...
|
||||
```
|
||||
|
||||
and then with the same filesystem you can now run the same program on a firewalled instance:
|
||||
```
|
||||
HF_DATASETS_OFFLINE=1 TRANSFORMERS_OFFLINE=1 \
|
||||
python examples/pytorch/translation/run_translation.py --model_name_or_path t5-small --dataset_name wmt16 --dataset_config ro-en ...
|
||||
```
|
||||
and it should succeed without any hanging waiting to timeout.
|
||||
|
||||
|
||||
If you expect to be downloading large volumes of models (more than 1,000) from our hosted bucket (for instance through
|
||||
your CI setup, or a large-scale production deployment), please cache the model files on your end. It will be way
|
||||
faster, and cheaper. Feel free to contact us privately if you need any help.
|
||||
|
||||
## Do you want to run a Transformer model on a mobile device?
|
||||
|
||||
You should check out our [swift-coreml-transformers](https://github.com/huggingface/swift-coreml-transformers) repo.
|
||||
|
||||
It contains a set of tools to convert PyTorch or TensorFlow 2.0 trained Transformer models (currently contains `GPT-2`,
|
||||
It contains a set of tools to convert PyTorch or TensorFlow 2.0 trained Transformer models (currently contains `GPT-2`,
|
||||
`DistilGPT-2`, `BERT`, and `DistilBERT`) to CoreML models that run on iOS devices.
|
||||
|
||||
At some point in the future, you'll be able to seamlessly move from pre-training or fine-tuning models in PyTorch or
|
||||
At some point in the future, you'll be able to seamlessly move from pretraining or fine-tuning models in PyTorch or
|
||||
TensorFlow 2.0 to productizing them in CoreML, or prototype a model or an app in CoreML then research its
|
||||
hyperparameters or architecture from PyTorch or TensorFlow 2.0. Super exciting!
|
||||
|
54
docs/source/internal/file_utils.rst
Normal file
54
docs/source/internal/file_utils.rst
Normal file
@ -0,0 +1,54 @@
|
||||
..
|
||||
Copyright 2021 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
General Utilities
|
||||
-----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
This page lists all of Transformers general utility functions that are found in the file ``file_utils.py``.
|
||||
|
||||
Most of those are only useful if you are studying the general code in the library.
|
||||
|
||||
|
||||
Enums and namedtuples
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.file_utils.ExplicitEnum
|
||||
|
||||
.. autoclass:: transformers.file_utils.PaddingStrategy
|
||||
|
||||
.. autoclass:: transformers.file_utils.TensorType
|
||||
|
||||
|
||||
Special Decorators
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autofunction:: transformers.file_utils.add_start_docstrings
|
||||
|
||||
.. autofunction:: transformers.file_utils.add_start_docstrings_to_model_forward
|
||||
|
||||
.. autofunction:: transformers.file_utils.add_end_docstrings
|
||||
|
||||
.. autofunction:: transformers.file_utils.add_code_sample_docstrings
|
||||
|
||||
.. autofunction:: transformers.file_utils.replace_return_docstrings
|
||||
|
||||
|
||||
Special Properties
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.file_utils.cached_property
|
||||
|
||||
|
||||
Other Utilities
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.file_utils._BaseLazyModule
|
@ -1,12 +1,114 @@
|
||||
..
|
||||
Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
Utilities for Generation
|
||||
-----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
This page lists all the utility functions used by :meth:`~transformers.PretrainedModel.generate`,
|
||||
:meth:`~transformers.PretrainedModel.greedy_search`, :meth:`~transformers.PretrainedModel.sample`,
|
||||
:meth:`~transformers.PretrainedModel.beam_search`, and :meth:`~transformers.PretrainedModel.beam_sample`.
|
||||
This page lists all the utility functions used by :meth:`~transformers.PreTrainedModel.generate`,
|
||||
:meth:`~transformers.PreTrainedModel.greedy_search`, :meth:`~transformers.PreTrainedModel.sample`,
|
||||
:meth:`~transformers.PreTrainedModel.beam_search`, :meth:`~transformers.PreTrainedModel.beam_sample`, and
|
||||
:meth:`~transformers.PreTrainedModel.group_beam_search`.
|
||||
|
||||
Most of those are only useful if you are studying the code of the generate methods in the library.
|
||||
|
||||
Generate Outputs
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The output of :meth:`~transformers.PreTrainedModel.generate` is an instance of a subclass of
|
||||
:class:`~transformers.file_utils.ModelOutput`. This output is a data structure containing all the information returned
|
||||
by :meth:`~transformers.PreTrainedModel.generate`, but that can also be used as tuple or dictionary.
|
||||
|
||||
Here's an example:
|
||||
|
||||
.. code-block::
|
||||
|
||||
from transformers import GPT2Tokenizer, GPT2LMHeadModel
|
||||
|
||||
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
|
||||
model = GPT2LMHeadModel.from_pretrained('gpt2')
|
||||
|
||||
inputs = tokenizer("Hello, my dog is cute and ", return_tensors="pt")
|
||||
generation_output = model.generate(**inputs, return_dict_in_generate=True, output_scores=True)
|
||||
|
||||
The ``generation_output`` object is a :class:`~transformers.generation_utils.GreedySearchDecoderOnlyOutput`, as we can
|
||||
see in the documentation of that class below, it means it has the following attributes:
|
||||
|
||||
- ``sequences``: the generated sequences of tokens
|
||||
- ``scores`` (optional): the prediction scores of the language modelling head, for each generation step
|
||||
- ``hidden_states`` (optional): the hidden states of the model, for each generation step
|
||||
- ``attentions`` (optional): the attention weights of the model, for each generation step
|
||||
|
||||
Here we have the ``scores`` since we passed along ``output_scores=True``, but we don't have ``hidden_states`` and
|
||||
``attentions`` because we didn't pass ``output_hidden_states=True`` or ``output_attentions=True``.
|
||||
|
||||
You can access each attribute as you would usually do, and if that attribute has not been returned by the model, you
|
||||
will get ``None``. Here for instance ``generation_output.scores`` are all the generated prediction scores of the
|
||||
language modeling head, and ``generation_output.attentions`` is ``None``.
|
||||
|
||||
When using our ``generation_output`` object as a tuple, it only keeps the attributes that don't have ``None`` values.
|
||||
Here, for instance, it has two elements, ``loss`` then ``logits``, so
|
||||
|
||||
.. code-block::
|
||||
|
||||
generation_output[:2]
|
||||
|
||||
will return the tuple ``(generation_output.sequences, generation_output.scores)`` for instance.
|
||||
|
||||
When using our ``generation_output`` object as a dictionary, it only keeps the attributes that don't have ``None``
|
||||
values. Here, for instance, it has two keys that are ``sequences`` and ``scores``.
|
||||
|
||||
We document here all output types.
|
||||
|
||||
|
||||
GreedySearchOutput
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
.. autoclass:: transformers.generation_utils.GreedySearchDecoderOnlyOutput
|
||||
:members:
|
||||
|
||||
.. autoclass:: transformers.generation_utils.GreedySearchEncoderDecoderOutput
|
||||
:members:
|
||||
|
||||
|
||||
SampleOutput
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
.. autoclass:: transformers.generation_utils.SampleDecoderOnlyOutput
|
||||
:members:
|
||||
|
||||
.. autoclass:: transformers.generation_utils.SampleEncoderDecoderOutput
|
||||
:members:
|
||||
|
||||
|
||||
BeamSearchOutput
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
.. autoclass:: transformers.generation_utils.BeamSearchDecoderOnlyOutput
|
||||
:members:
|
||||
|
||||
.. autoclass:: transformers.generation_utils.BeamSearchEncoderDecoderOutput
|
||||
:members:
|
||||
|
||||
|
||||
BeamSampleOutput
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
.. autoclass:: transformers.generation_utils.BeamSampleDecoderOnlyOutput
|
||||
:members:
|
||||
|
||||
.. autoclass:: transformers.generation_utils.BeamSampleEncoderDecoderOutput
|
||||
:members:
|
||||
|
||||
|
||||
LogitsProcessor
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
@ -19,6 +121,9 @@ generation.
|
||||
.. autoclass:: transformers.LogitsProcessorList
|
||||
:members: __call__
|
||||
|
||||
.. autoclass:: transformers.LogitsWarper
|
||||
:members: __call__
|
||||
|
||||
.. autoclass:: transformers.MinLengthLogitsProcessor
|
||||
:members: __call__
|
||||
|
||||
@ -40,6 +145,39 @@ generation.
|
||||
.. autoclass:: transformers.NoBadWordsLogitsProcessor
|
||||
:members: __call__
|
||||
|
||||
.. autoclass:: transformers.PrefixConstrainedLogitsProcessor
|
||||
:members: __call__
|
||||
|
||||
.. autoclass:: transformers.HammingDiversityLogitsProcessor
|
||||
:members: __call__
|
||||
|
||||
.. autoclass:: transformers.ForcedBOSTokenLogitsProcessor
|
||||
:members: __call__
|
||||
|
||||
.. autoclass:: transformers.ForcedEOSTokenLogitsProcessor
|
||||
:members: __call__
|
||||
|
||||
.. autoclass:: transformers.InfNanRemoveLogitsProcessor
|
||||
:members: __call__
|
||||
|
||||
|
||||
StoppingCriteria
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
A :class:`~transformers.StoppingCriteria` can be used to change when to stop generation (other than EOS token).
|
||||
|
||||
.. autoclass:: transformers.StoppingCriteria
|
||||
:members: __call__
|
||||
|
||||
.. autoclass:: transformers.StoppingCriteriaList
|
||||
:members: __call__
|
||||
|
||||
.. autoclass:: transformers.MaxLengthCriteria
|
||||
:members: __call__
|
||||
|
||||
.. autoclass:: transformers.MaxTimeCriteria
|
||||
:members: __call__
|
||||
|
||||
BeamSearch
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
@ -48,3 +186,10 @@ BeamSearch
|
||||
|
||||
.. autoclass:: transformers.BeamSearchScorer
|
||||
:members: process, finalize
|
||||
|
||||
Utilities
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autofunction:: transformers.top_k_top_p_filtering
|
||||
|
||||
.. autofunction:: transformers.tf_top_k_top_p_filtering
|
||||
|
@ -1,3 +1,15 @@
|
||||
..
|
||||
Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
Custom Layers and Utilities
|
||||
-----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
@ -79,8 +91,6 @@ TensorFlow loss functions
|
||||
TensorFlow Helper Functions
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autofunction:: transformers.modeling_tf_utils.cast_bool_to_primitive
|
||||
|
||||
.. autofunction:: transformers.modeling_tf_utils.get_initializer
|
||||
|
||||
.. autofunction:: transformers.modeling_tf_utils.keras_serializable
|
||||
|
@ -1,3 +1,15 @@
|
||||
..
|
||||
Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
Utilities for pipelines
|
||||
-----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
@ -35,6 +47,4 @@ Data format
|
||||
Utilities
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autofunction:: transformers.pipelines.get_framework
|
||||
|
||||
.. autoclass:: transformers.pipelines.PipelineException
|
||||
|
@ -1,3 +1,15 @@
|
||||
..
|
||||
Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
Utilities for Tokenizers
|
||||
-----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
@ -26,12 +38,6 @@ SpecialTokensMixin
|
||||
Enums and namedtuples
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.tokenization_utils_base.ExplicitEnum
|
||||
|
||||
.. autoclass:: transformers.tokenization_utils_base.PaddingStrategy
|
||||
|
||||
.. autoclass:: transformers.tokenization_utils_base.TensorType
|
||||
|
||||
.. autoclass:: transformers.tokenization_utils_base.TruncationStrategy
|
||||
|
||||
.. autoclass:: transformers.tokenization_utils_base.CharSpan
|
||||
|
@ -1,3 +1,15 @@
|
||||
..
|
||||
Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
Utilities for Trainer
|
||||
-----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
@ -10,6 +22,8 @@ Utilities
|
||||
|
||||
.. autoclass:: transformers.EvalPrediction
|
||||
|
||||
.. autoclass:: transformers.IntervalStrategy
|
||||
|
||||
.. autofunction:: transformers.set_seed
|
||||
|
||||
.. autofunction:: transformers.torch_distributed_zero_first
|
||||
@ -20,8 +34,21 @@ Callbacks internals
|
||||
|
||||
.. autoclass:: transformers.trainer_callback.CallbackHandler
|
||||
|
||||
|
||||
Distributed Evaluation
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.trainer_pt_utils.DistributedTensorGatherer
|
||||
:members:
|
||||
|
||||
|
||||
Distributed Evaluation
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.HfArgumentParser
|
||||
|
||||
|
||||
Debug Utilities
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.debug_utils.DebugUnderflowOverflow
|
||||
|
@ -1,3 +1,15 @@
|
||||
..
|
||||
Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
Callbacks
|
||||
-----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
@ -44,6 +56,8 @@ Here is the list of the available :class:`~transformers.TrainerCallback` in the
|
||||
|
||||
.. autoclass:: transformers.ProgressCallback
|
||||
|
||||
.. autoclass:: transformers.EarlyStoppingCallback
|
||||
|
||||
.. autoclass:: transformers.integrations.TensorBoardCallback
|
||||
|
||||
.. autoclass:: transformers.integrations.WandbCallback
|
||||
@ -60,6 +74,32 @@ TrainerCallback
|
||||
.. autoclass:: transformers.TrainerCallback
|
||||
:members:
|
||||
|
||||
Here is an example of how to register a custom callback with the PyTorch :class:`~transformers.Trainer`:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
class MyCallback(TrainerCallback):
|
||||
"A callback that prints a message at the beginning of training"
|
||||
|
||||
def on_train_begin(self, args, state, control, **kwargs):
|
||||
print("Starting training")
|
||||
|
||||
trainer = Trainer(
|
||||
model,
|
||||
args,
|
||||
train_dataset=train_dataset,
|
||||
eval_dataset=eval_dataset,
|
||||
callbacks=[MyCallback] # We can either pass the callback class this way or an instance of it (MyCallback())
|
||||
)
|
||||
|
||||
Another way to register a callback is to call ``trainer.add_callback()`` as follows:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
trainer = Trainer(...)
|
||||
trainer.add_callback(MyCallback)
|
||||
# Alternatively, we can pass an instance of the callback class
|
||||
trainer.add_callback(MyCallback())
|
||||
|
||||
TrainerState
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
@ -1,3 +1,15 @@
|
||||
..
|
||||
Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
Configuration
|
||||
-----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
|
71
docs/source/main_classes/data_collator.rst
Normal file
71
docs/source/main_classes/data_collator.rst
Normal file
@ -0,0 +1,71 @@
|
||||
..
|
||||
Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
Data Collator
|
||||
-----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
Data collators are objects that will form a batch by using a list of dataset elements as input. These elements are of
|
||||
the same type as the elements of :obj:`train_dataset` or :obj:`eval_dataset`.
|
||||
|
||||
To be able to build batches, data collators may apply some processing (like padding). Some of them (like
|
||||
:class:`~transformers.DataCollatorForLanguageModeling`) also apply some random data augmentation (like random masking)
|
||||
oin the formed batch.
|
||||
|
||||
Examples of use can be found in the :doc:`example scripts <../examples>` or :doc:`example notebooks <../notebooks>`.
|
||||
|
||||
|
||||
Default data collator
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autofunction:: transformers.data.data_collator.default_data_collator
|
||||
|
||||
|
||||
DataCollatorWithPadding
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.data.data_collator.DataCollatorWithPadding
|
||||
:members:
|
||||
|
||||
|
||||
DataCollatorForTokenClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.data.data_collator.DataCollatorForTokenClassification
|
||||
:members:
|
||||
|
||||
|
||||
DataCollatorForSeq2Seq
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.data.data_collator.DataCollatorForSeq2Seq
|
||||
:members:
|
||||
|
||||
|
||||
DataCollatorForLanguageModeling
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.data.data_collator.DataCollatorForLanguageModeling
|
||||
:members: mask_tokens
|
||||
|
||||
|
||||
DataCollatorForWholeWordMask
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.data.data_collator.DataCollatorForWholeWordMask
|
||||
:members: mask_tokens
|
||||
|
||||
|
||||
DataCollatorForPermutationLanguageModeling
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.data.data_collator.DataCollatorForPermutationLanguageModeling
|
||||
:members: mask_tokens
|
48
docs/source/main_classes/feature_extractor.rst
Normal file
48
docs/source/main_classes/feature_extractor.rst
Normal file
@ -0,0 +1,48 @@
|
||||
..
|
||||
Copyright 2021 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
|
||||
Feature Extractor
|
||||
-----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
A feature extractor is in charge of preparing input features for a multi-modal model. This includes feature extraction
|
||||
from sequences, *e.g.*, pre-processing audio files to Log-Mel Spectrogram features, feature extraction from images
|
||||
*e.g.* cropping image image files, but also padding, normalization, and conversion to Numpy, PyTorch, and TensorFlow
|
||||
tensors.
|
||||
|
||||
|
||||
FeatureExtractionMixin
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.feature_extraction_utils.FeatureExtractionMixin
|
||||
:members: from_pretrained, save_pretrained
|
||||
|
||||
|
||||
SequenceFeatureExtractor
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.SequenceFeatureExtractor
|
||||
:members: pad
|
||||
|
||||
|
||||
BatchFeature
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.BatchFeature
|
||||
:members:
|
||||
|
||||
|
||||
ImageFeatureExtractionMixin
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.image_utils.ImageFeatureExtractionMixin
|
||||
:members:
|
@ -1,3 +1,15 @@
|
||||
..
|
||||
Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
Logging
|
||||
-----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
@ -53,6 +65,10 @@ Other functions
|
||||
|
||||
.. autofunction:: transformers.logging.get_logger
|
||||
|
||||
.. autofunction:: transformers.logging.enable_default_handler
|
||||
|
||||
.. autofunction:: transformers.logging.disable_default_handler
|
||||
|
||||
.. autofunction:: transformers.logging.enable_explicit_format
|
||||
|
||||
.. autofunction:: transformers.logging.reset_format
|
||||
|
@ -1,9 +1,22 @@
|
||||
..
|
||||
Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
Models
|
||||
-----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
The base classes :class:`~transformers.PreTrainedModel` and :class:`~transformers.TFPreTrainedModel` implement the
|
||||
common methods for loading/saving a model either from a local file or directory, or from a pretrained model
|
||||
configuration provided by the library (downloaded from HuggingFace's AWS S3 repository).
|
||||
The base classes :class:`~transformers.PreTrainedModel`, :class:`~transformers.TFPreTrainedModel`, and
|
||||
:class:`~transformers.FlaxPreTrainedModel` implement the common methods for loading/saving a model either from a local
|
||||
file or directory, or from a pretrained model configuration provided by the library (downloaded from HuggingFace's AWS
|
||||
S3 repository).
|
||||
|
||||
:class:`~transformers.PreTrainedModel` and :class:`~transformers.TFPreTrainedModel` also implement a few methods which
|
||||
are common among all the models to:
|
||||
@ -45,6 +58,13 @@ TFModelUtilsMixin
|
||||
:members:
|
||||
|
||||
|
||||
FlaxPreTrainedModel
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.FlaxPreTrainedModel
|
||||
:members:
|
||||
|
||||
|
||||
Generation
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
@ -53,3 +73,10 @@ Generation
|
||||
|
||||
.. autoclass:: transformers.generation_tf_utils.TFGenerationMixin
|
||||
:members:
|
||||
|
||||
|
||||
Pushing to the Hub
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.file_utils.PushToHubMixin
|
||||
:members:
|
||||
|
@ -1,3 +1,15 @@
|
||||
..
|
||||
Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
Optimization
|
||||
-----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
@ -31,6 +43,10 @@ Schedules
|
||||
Learning Rate Schedules (Pytorch)
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
.. autoclass:: transformers.SchedulerType
|
||||
|
||||
.. autofunction:: transformers.get_scheduler
|
||||
|
||||
.. autofunction:: transformers.get_constant_schedule
|
||||
|
||||
|
||||
@ -62,6 +78,10 @@ Learning Rate Schedules (Pytorch)
|
||||
:target: /imgs/warmup_linear_schedule.png
|
||||
:alt:
|
||||
|
||||
|
||||
.. autofunction:: transformers.get_polynomial_decay_schedule_with_warmup
|
||||
|
||||
|
||||
Warmup (TensorFlow)
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
|
@ -1,8 +1,20 @@
|
||||
..
|
||||
Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
Model outputs
|
||||
-----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
PyTorch models have outputs that are instances of subclasses of :class:`~transformers.file_utils.ModelOutput`. Those
|
||||
are data structures containing all the information returned by the model, but that can also be used as tuples or
|
||||
All models have outputs that are instances of subclasses of :class:`~transformers.file_utils.ModelOutput`. Those are
|
||||
data structures containing all the information returned by the model, but that can also be used as tuples or
|
||||
dictionaries.
|
||||
|
||||
Let's see of this looks on an example:
|
||||
@ -48,7 +60,7 @@ ModelOutput
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.file_utils.ModelOutput
|
||||
:members:
|
||||
:members: to_tuple
|
||||
|
||||
|
||||
BaseModelOutput
|
||||
@ -114,13 +126,6 @@ CausalLMOutputWithCrossAttentions
|
||||
:members:
|
||||
|
||||
|
||||
CausalLMOutputWithPastAndCrossAttentions
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.modeling_outputs.CausalLMOutputWithPastAndCrossAttentions
|
||||
:members:
|
||||
|
||||
|
||||
CausalLMOutputWithPast
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
|
@ -1,3 +1,15 @@
|
||||
..
|
||||
Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
Pipelines
|
||||
-----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
@ -11,9 +23,11 @@ There are two categories of pipeline abstractions to be aware about:
|
||||
- The :func:`~transformers.pipeline` which is the most powerful object encapsulating all other pipelines.
|
||||
- The other task-specific pipelines:
|
||||
|
||||
- :class:`~transformers.AutomaticSpeechRecognitionPipeline`
|
||||
- :class:`~transformers.ConversationalPipeline`
|
||||
- :class:`~transformers.FeatureExtractionPipeline`
|
||||
- :class:`~transformers.FillMaskPipeline`
|
||||
- :class:`~transformers.ImageClassificationPipeline`
|
||||
- :class:`~transformers.QuestionAnsweringPipeline`
|
||||
- :class:`~transformers.SummarizationPipeline`
|
||||
- :class:`~transformers.TextClassificationPipeline`
|
||||
@ -22,6 +36,7 @@ There are two categories of pipeline abstractions to be aware about:
|
||||
- :class:`~transformers.TranslationPipeline`
|
||||
- :class:`~transformers.ZeroShotClassificationPipeline`
|
||||
- :class:`~transformers.Text2TextGenerationPipeline`
|
||||
- :class:`~transformers.TableQuestionAnsweringPipeline`
|
||||
|
||||
The pipeline abstraction
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
@ -35,6 +50,13 @@ pipeline but requires an additional argument which is the `task`.
|
||||
The task specific pipelines
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
AutomaticSpeechRecognitionPipeline
|
||||
=======================================================================================================================
|
||||
|
||||
.. autoclass:: transformers.AutomaticSpeechRecognitionPipeline
|
||||
:special-members: __call__
|
||||
:members:
|
||||
|
||||
ConversationalPipeline
|
||||
=======================================================================================================================
|
||||
|
||||
@ -58,11 +80,19 @@ FillMaskPipeline
|
||||
:special-members: __call__
|
||||
:members:
|
||||
|
||||
ImageClassificationPipeline
|
||||
=======================================================================================================================
|
||||
|
||||
.. autoclass:: transformers.ImageClassificationPipeline
|
||||
:special-members: __call__
|
||||
:members:
|
||||
|
||||
NerPipeline
|
||||
=======================================================================================================================
|
||||
|
||||
This class is an alias of the :class:`~transformers.TokenClassificationPipeline` defined below. Please refer to that
|
||||
pipeline for documentation and usage examples.
|
||||
.. autoclass:: transformers.NerPipeline
|
||||
|
||||
See :class:`~transformers.TokenClassificationPipeline` for all details.
|
||||
|
||||
QuestionAnsweringPipeline
|
||||
=======================================================================================================================
|
||||
@ -78,6 +108,13 @@ SummarizationPipeline
|
||||
:special-members: __call__
|
||||
:members:
|
||||
|
||||
TableQuestionAnsweringPipeline
|
||||
=======================================================================================================================
|
||||
|
||||
.. autoclass:: transformers.TableQuestionAnsweringPipeline
|
||||
:special-members: __call__
|
||||
|
||||
|
||||
TextClassificationPipeline
|
||||
=======================================================================================================================
|
||||
|
||||
@ -106,6 +143,13 @@ TokenClassificationPipeline
|
||||
:special-members: __call__
|
||||
:members:
|
||||
|
||||
TranslationPipeline
|
||||
=======================================================================================================================
|
||||
|
||||
.. autoclass:: transformers.TranslationPipeline
|
||||
:special-members: __call__
|
||||
:members:
|
||||
|
||||
ZeroShotClassificationPipeline
|
||||
=======================================================================================================================
|
||||
|
||||
|
@ -1,3 +1,15 @@
|
||||
..
|
||||
Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
Processors
|
||||
-----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
@ -56,8 +68,8 @@ Additionally, the following method can be used to load values from a data file a
|
||||
Example usage
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
An example using these processors is given in the `run_glue.py
|
||||
<https://github.com/huggingface/pytorch-transformers/blob/master/examples/text-classification/run_glue.py>`__ script.
|
||||
An example using these processors is given in the :prefix_link:`run_glue.py
|
||||
<examples/legacy/text-classification/run_glue.py>` script.
|
||||
|
||||
|
||||
XNLI
|
||||
@ -77,8 +89,8 @@ This library hosts the processor to load the XNLI data:
|
||||
|
||||
Please note that since the gold labels are available on the test set, evaluation is performed on the test set.
|
||||
|
||||
An example using these processors is given in the `run_xnli.py
|
||||
<https://github.com/huggingface/pytorch-transformers/blob/master/examples/text-classification/run_xnli.py>`__ script.
|
||||
An example using these processors is given in the :prefix_link:`run_xnli.py
|
||||
<examples/legacy/text-classification/run_xnli.py>` script.
|
||||
|
||||
|
||||
SQuAD
|
||||
@ -156,5 +168,5 @@ Using `tensorflow_datasets` is as easy as using a data file:
|
||||
)
|
||||
|
||||
|
||||
Another example using these processors is given in the `run_squad.py
|
||||
<https://github.com/huggingface/transformers/blob/master/examples/question-answering/run_squad.py>`__ script.
|
||||
Another example using these processors is given in the :prefix_link:`run_squad.py
|
||||
<examples/legacy/question-answering/run_squad.py>` script.
|
||||
|
@ -1,3 +1,15 @@
|
||||
..
|
||||
Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
Tokenizer
|
||||
-----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
@ -42,15 +54,24 @@ PreTrainedTokenizer
|
||||
|
||||
.. autoclass:: transformers.PreTrainedTokenizer
|
||||
:special-members: __call__
|
||||
:members:
|
||||
:members: batch_decode, convert_ids_to_tokens, convert_tokens_to_ids, convert_tokens_to_string, decode, encode,
|
||||
get_added_vocab, get_special_tokens_mask, num_special_tokens_to_add, prepare_for_tokenization, tokenize,
|
||||
vocab_size
|
||||
|
||||
|
||||
PreTrainedTokenizerFast
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The :class:`~transformers.PreTrainedTokenizerFast` depend on the `tokenizers
|
||||
<https://huggingface.co/docs/tokenizers>`__ library. The tokenizers obtained from the 🤗 tokenizers library can be
|
||||
loaded very simply into 🤗 transformers. Take a look at the :doc:`Using tokenizers from 🤗 tokenizers
|
||||
<../fast_tokenizers>` page to understand how this is done.
|
||||
|
||||
.. autoclass:: transformers.PreTrainedTokenizerFast
|
||||
:special-members: __call__
|
||||
:members:
|
||||
:members: batch_decode, convert_ids_to_tokens, convert_tokens_to_ids, convert_tokens_to_string, decode, encode,
|
||||
get_added_vocab, get_special_tokens_mask, num_special_tokens_to_add,
|
||||
set_truncation_and_padding,tokenize, vocab_size
|
||||
|
||||
|
||||
BatchEncoding
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,5 +1,186 @@
|
||||
<!---
|
||||
Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
-->
|
||||
|
||||
# Migrating from previous packages
|
||||
|
||||
## Migrating from transformers `v3.x` to `v4.x`
|
||||
|
||||
A couple of changes were introduced when the switch from version 3 to version 4 was done. Below is a summary of the
|
||||
expected changes:
|
||||
|
||||
#### 1. AutoTokenizers and pipelines now use fast (rust) tokenizers by default.
|
||||
|
||||
The python and rust tokenizers have roughly the same API, but the rust tokenizers have a more complete feature set.
|
||||
|
||||
This introduces two breaking changes:
|
||||
- The handling of overflowing tokens between the python and rust tokenizers is different.
|
||||
- The rust tokenizers do not accept integers in the encoding methods.
|
||||
|
||||
##### How to obtain the same behavior as v3.x in v4.x
|
||||
|
||||
- The pipelines now contain additional features out of the box. See the [token-classification pipeline with the `grouped_entities` flag](https://huggingface.co/transformers/main_classes/pipelines.html?highlight=textclassification#tokenclassificationpipeline).
|
||||
- The auto-tokenizers now return rust tokenizers. In order to obtain the python tokenizers instead, the user may use the `use_fast` flag by setting it to `False`:
|
||||
|
||||
In version `v3.x`:
|
||||
```py
|
||||
from transformers import AutoTokenizer
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
|
||||
```
|
||||
to obtain the same in version `v4.x`:
|
||||
```py
|
||||
from transformers import AutoTokenizer
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained("bert-base-cased", use_fast=False)
|
||||
```
|
||||
|
||||
#### 2. SentencePiece is removed from the required dependencies
|
||||
|
||||
The requirement on the SentencePiece dependency has been lifted from the `setup.py`. This is done so that we may have a channel on anaconda cloud without relying on `conda-forge`. This means that the tokenizers that depend on the SentencePiece library will not be available with a standard `transformers` installation.
|
||||
|
||||
This includes the **slow** versions of:
|
||||
- `XLNetTokenizer`
|
||||
- `AlbertTokenizer`
|
||||
- `CamembertTokenizer`
|
||||
- `MBartTokenizer`
|
||||
- `PegasusTokenizer`
|
||||
- `T5Tokenizer`
|
||||
- `ReformerTokenizer`
|
||||
- `XLMRobertaTokenizer`
|
||||
|
||||
##### How to obtain the same behavior as v3.x in v4.x
|
||||
|
||||
In order to obtain the same behavior as version `v3.x`, you should install `sentencepiece` additionally:
|
||||
|
||||
In version `v3.x`:
|
||||
```bash
|
||||
pip install transformers
|
||||
```
|
||||
to obtain the same in version `v4.x`:
|
||||
```bash
|
||||
pip install transformers[sentencepiece]
|
||||
```
|
||||
or
|
||||
```bash
|
||||
pip install transformers sentencepiece
|
||||
```
|
||||
#### 3. The architecture of the repo has been updated so that each model resides in its folder
|
||||
|
||||
The past and foreseeable addition of new models means that the number of files in the directory `src/transformers` keeps growing and becomes harder to navigate and understand. We made the choice to put each model and the files accompanying it in their own sub-directories.
|
||||
|
||||
This is a breaking change as importing intermediary layers using a model's module directly needs to be done via a different path.
|
||||
|
||||
##### How to obtain the same behavior as v3.x in v4.x
|
||||
|
||||
In order to obtain the same behavior as version `v3.x`, you should update the path used to access the layers.
|
||||
|
||||
In version `v3.x`:
|
||||
```bash
|
||||
from transformers.modeling_bert import BertLayer
|
||||
```
|
||||
to obtain the same in version `v4.x`:
|
||||
```bash
|
||||
from transformers.models.bert.modeling_bert import BertLayer
|
||||
```
|
||||
|
||||
#### 4. Switching the `return_dict` argument to `True` by default
|
||||
|
||||
The [`return_dict` argument](https://huggingface.co/transformers/main_classes/output.html) enables the return of dict-like python objects containing the model outputs, instead of the standard tuples. This object is self-documented as keys can be used to retrieve values, while also behaving as a tuple as users may retrieve objects by index or by slice.
|
||||
|
||||
This is a breaking change as the limitation of that tuple is that it cannot be unpacked: `value0, value1 = outputs` will not work.
|
||||
|
||||
##### How to obtain the same behavior as v3.x in v4.x
|
||||
|
||||
In order to obtain the same behavior as version `v3.x`, you should specify the `return_dict` argument to `False`, either in the model configuration or during the forward pass.
|
||||
|
||||
In version `v3.x`:
|
||||
```bash
|
||||
model = BertModel.from_pretrained("bert-base-cased")
|
||||
outputs = model(**inputs)
|
||||
```
|
||||
to obtain the same in version `v4.x`:
|
||||
```bash
|
||||
model = BertModel.from_pretrained("bert-base-cased")
|
||||
outputs = model(**inputs, return_dict=False)
|
||||
```
|
||||
or
|
||||
```bash
|
||||
model = BertModel.from_pretrained("bert-base-cased", return_dict=False)
|
||||
outputs = model(**inputs)
|
||||
```
|
||||
|
||||
#### 5. Removed some deprecated attributes
|
||||
|
||||
Attributes that were deprecated have been removed if they had been deprecated for at least a month. The full list of deprecated attributes can be found in [#8604](https://github.com/huggingface/transformers/pull/8604).
|
||||
|
||||
Here is a list of these attributes/methods/arguments and what their replacements should be:
|
||||
|
||||
In several models, the labels become consistent with the other models:
|
||||
- `masked_lm_labels` becomes `labels` in `AlbertForMaskedLM` and `AlbertForPreTraining`.
|
||||
- `masked_lm_labels` becomes `labels` in `BertForMaskedLM` and `BertForPreTraining`.
|
||||
- `masked_lm_labels` becomes `labels` in `DistilBertForMaskedLM`.
|
||||
- `masked_lm_labels` becomes `labels` in `ElectraForMaskedLM`.
|
||||
- `masked_lm_labels` becomes `labels` in `LongformerForMaskedLM`.
|
||||
- `masked_lm_labels` becomes `labels` in `MobileBertForMaskedLM`.
|
||||
- `masked_lm_labels` becomes `labels` in `RobertaForMaskedLM`.
|
||||
- `lm_labels` becomes `labels` in `BartForConditionalGeneration`.
|
||||
- `lm_labels` becomes `labels` in `GPT2DoubleHeadsModel`.
|
||||
- `lm_labels` becomes `labels` in `OpenAIGPTDoubleHeadsModel`.
|
||||
- `lm_labels` becomes `labels` in `T5ForConditionalGeneration`.
|
||||
|
||||
In several models, the caching mechanism becomes consistent with the other models:
|
||||
- `decoder_cached_states` becomes `past_key_values` in all BART-like, FSMT and T5 models.
|
||||
- `decoder_past_key_values` becomes `past_key_values` in all BART-like, FSMT and T5 models.
|
||||
- `past` becomes `past_key_values` in all CTRL models.
|
||||
- `past` becomes `past_key_values` in all GPT-2 models.
|
||||
|
||||
Regarding the tokenizer classes:
|
||||
- The tokenizer attribute `max_len` becomes `model_max_length`.
|
||||
- The tokenizer attribute `return_lengths` becomes `return_length`.
|
||||
- The tokenizer encoding argument `is_pretokenized` becomes `is_split_into_words`.
|
||||
|
||||
Regarding the `Trainer` class:
|
||||
- The `Trainer` argument `tb_writer` is removed in favor of the callback `TensorBoardCallback(tb_writer=...)`.
|
||||
- The `Trainer` argument `prediction_loss_only` is removed in favor of the class argument `args.prediction_loss_only`.
|
||||
- The `Trainer` attribute `data_collator` should be a callable.
|
||||
- The `Trainer` method `_log` is deprecated in favor of `log`.
|
||||
- The `Trainer` method `_training_step` is deprecated in favor of `training_step`.
|
||||
- The `Trainer` method `_prediction_loop` is deprecated in favor of `prediction_loop`.
|
||||
- The `Trainer` method `is_local_master` is deprecated in favor of `is_local_process_zero`.
|
||||
- The `Trainer` method `is_world_master` is deprecated in favor of `is_world_process_zero`.
|
||||
|
||||
Regarding the `TFTrainer` class:
|
||||
- The `TFTrainer` argument `prediction_loss_only` is removed in favor of the class argument `args.prediction_loss_only`.
|
||||
- The `Trainer` method `_log` is deprecated in favor of `log`.
|
||||
- The `TFTrainer` method `_prediction_loop` is deprecated in favor of `prediction_loop`.
|
||||
- The `TFTrainer` method `_setup_wandb` is deprecated in favor of `setup_wandb`.
|
||||
- The `TFTrainer` method `_run_model` is deprecated in favor of `run_model`.
|
||||
|
||||
Regarding the `TrainingArguments` class:
|
||||
- The `TrainingArguments` argument `evaluate_during_training` is deprecated in favor of `evaluation_strategy`.
|
||||
|
||||
Regarding the Transfo-XL model:
|
||||
- The Transfo-XL configuration attribute `tie_weight` becomes `tie_words_embeddings`.
|
||||
- The Transfo-XL modeling method `reset_length` becomes `reset_memory_length`.
|
||||
|
||||
Regarding pipelines:
|
||||
- The `FillMaskPipeline` argument `topk` becomes `top_k`.
|
||||
|
||||
|
||||
|
||||
## Migrating from pytorch-transformers to 🤗 Transformers
|
||||
|
||||
Here is a quick summary of what you should take care of when migrating from `pytorch-transformers` to 🤗 Transformers.
|
||||
|
@ -1,3 +1,15 @@
|
||||
..
|
||||
Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
ALBERT
|
||||
-----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
@ -31,7 +43,8 @@ Tips:
|
||||
similar to a BERT-like architecture with the same number of hidden layers as it has to iterate through the same
|
||||
number of (repeating) layers.
|
||||
|
||||
The original code can be found `here <https://github.com/google-research/ALBERT>`__.
|
||||
This model was contributed by `lysandre <https://huggingface.co/lysandre>`__. The original code can be found `here
|
||||
<https://github.com/google-research/ALBERT>`__.
|
||||
|
||||
AlbertConfig
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
@ -48,6 +61,13 @@ AlbertTokenizer
|
||||
create_token_type_ids_from_sequences, save_vocabulary
|
||||
|
||||
|
||||
AlbertTokenizerFast
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.AlbertTokenizerFast
|
||||
:members:
|
||||
|
||||
|
||||
Albert specific outputs
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
|
@ -1,3 +1,15 @@
|
||||
..
|
||||
Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
Auto Classes
|
||||
-----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
@ -32,6 +44,13 @@ AutoTokenizer
|
||||
:members:
|
||||
|
||||
|
||||
AutoFeatureExtractor
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.AutoFeatureExtractor
|
||||
:members:
|
||||
|
||||
|
||||
AutoModel
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
@ -102,6 +121,20 @@ AutoModelForQuestionAnswering
|
||||
:members:
|
||||
|
||||
|
||||
AutoModelForTableQuestionAnswering
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.AutoModelForTableQuestionAnswering
|
||||
:members:
|
||||
|
||||
|
||||
AutoModelForImageClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.AutoModelForImageClassification
|
||||
:members:
|
||||
|
||||
|
||||
TFAutoModel
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
@ -163,3 +196,59 @@ TFAutoModelForQuestionAnswering
|
||||
|
||||
.. autoclass:: transformers.TFAutoModelForQuestionAnswering
|
||||
:members:
|
||||
|
||||
|
||||
FlaxAutoModel
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.FlaxAutoModel
|
||||
:members:
|
||||
|
||||
|
||||
FlaxAutoModelForPreTraining
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.FlaxAutoModelForPreTraining
|
||||
:members:
|
||||
|
||||
|
||||
FlaxAutoModelForMaskedLM
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.FlaxAutoModelForMaskedLM
|
||||
:members:
|
||||
|
||||
|
||||
FlaxAutoModelForSequenceClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.FlaxAutoModelForSequenceClassification
|
||||
:members:
|
||||
|
||||
|
||||
FlaxAutoModelForQuestionAnswering
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.FlaxAutoModelForQuestionAnswering
|
||||
:members:
|
||||
|
||||
|
||||
FlaxAutoModelForTokenClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.FlaxAutoModelForTokenClassification
|
||||
:members:
|
||||
|
||||
|
||||
FlaxAutoModelForMultipleChoice
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.FlaxAutoModelForMultipleChoice
|
||||
:members:
|
||||
|
||||
|
||||
FlaxAutoModelForNextSentencePrediction
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.FlaxAutoModelForNextSentencePrediction
|
||||
:members:
|
||||
|
@ -1,3 +1,15 @@
|
||||
..
|
||||
Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
BART
|
||||
-----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
@ -23,14 +35,15 @@ According to the abstract,
|
||||
state-of-the-art results on a range of abstractive dialogue, question answering, and summarization tasks, with gains
|
||||
of up to 6 ROUGE.
|
||||
|
||||
The Authors' code can be found `here <https://github.com/pytorch/fairseq/tree/master/examples/bart>`__.
|
||||
This model was contributed by `sshleifer <https://huggingface.co/sshleifer>`__. The Authors' code can be found `here
|
||||
<https://github.com/pytorch/fairseq/tree/master/examples/bart>`__.
|
||||
|
||||
|
||||
Examples
|
||||
_______________________________________________________________________________________________________________________
|
||||
|
||||
- Examples and scripts for fine-tuning BART and other models for sequence to sequence tasks can be found in
|
||||
`examples/seq2seq/ <https://github.com/huggingface/transformers/blob/master/examples/seq2seq/README.md>`__.
|
||||
:prefix_link:`examples/pytorch/summarization/ <examples/pytorch/summarization/README.md>`.
|
||||
- An example of how to train :class:`~transformers.BartForConditionalGeneration` with a Hugging Face :obj:`datasets`
|
||||
object can be found in this `forum discussion
|
||||
<https://discuss.huggingface.co/t/train-bart-for-conditional-generation-e-g-summarization/1904>`__.
|
||||
@ -43,9 +56,8 @@ Implementation Notes
|
||||
|
||||
- Bart doesn't use :obj:`token_type_ids` for sequence classification. Use :class:`~transformers.BartTokenizer` or
|
||||
:meth:`~transformers.BartTokenizer.encode` to get the proper splitting.
|
||||
- The forward pass of :class:`~transformers.BartModel` will create decoder inputs (using the helper function
|
||||
:func:`transformers.models.bart.modeling_bart._prepare_bart_decoder_inputs`) if they are not passed. This is
|
||||
different than some other modeling APIs.
|
||||
- The forward pass of :class:`~transformers.BartModel` will create the ``decoder_input_ids`` if they are not passed.
|
||||
This is different than some other modeling APIs. A typical use case of this feature is mask filling.
|
||||
- Model predictions are intended to be identical to the original implementation when
|
||||
:obj:`force_bos_token_to_be_generated=True`. This only works, however, if the string you pass to
|
||||
:func:`fairseq.encode` starts with a space.
|
||||
@ -53,7 +65,6 @@ Implementation Notes
|
||||
summarization, see the example in that docstrings.
|
||||
- Models that load the `facebook/bart-large-cnn` weights will not have a :obj:`mask_token_id`, or be able to perform
|
||||
mask-filling tasks.
|
||||
- For training/forward passes that don't involve beam search, pass :obj:`use_cache=False`.
|
||||
|
||||
Mask Filling
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
@ -86,6 +97,12 @@ BartTokenizer
|
||||
:members:
|
||||
|
||||
|
||||
BartTokenizerFast
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.BartTokenizerFast
|
||||
:members:
|
||||
|
||||
|
||||
BartModel
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
@ -93,8 +110,6 @@ BartModel
|
||||
.. autoclass:: transformers.BartModel
|
||||
:members: forward
|
||||
|
||||
.. autofunction:: transformers.models.bart.modeling_bart._prepare_bart_decoder_inputs
|
||||
|
||||
|
||||
BartForConditionalGeneration
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
@ -116,6 +131,12 @@ BartForQuestionAnswering
|
||||
.. autoclass:: transformers.BartForQuestionAnswering
|
||||
:members: forward
|
||||
|
||||
BartForCausalLM
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.BartForCausalLM
|
||||
:members: forward
|
||||
|
||||
|
||||
|
||||
TFBartModel
|
||||
|
60
docs/source/model_doc/barthez.rst
Normal file
60
docs/source/model_doc/barthez.rst
Normal file
@ -0,0 +1,60 @@
|
||||
..
|
||||
Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
BARThez
|
||||
-----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
Overview
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The BARThez model was proposed in `BARThez: a Skilled Pretrained French Sequence-to-Sequence Model
|
||||
<https://arxiv.org/abs/2010.12321>`__ by Moussa Kamal Eddine, Antoine J.-P. Tixier, Michalis Vazirgiannis on 23 Oct,
|
||||
2020.
|
||||
|
||||
The abstract of the paper:
|
||||
|
||||
|
||||
*Inductive transfer learning, enabled by self-supervised learning, have taken the entire Natural Language Processing
|
||||
(NLP) field by storm, with models such as BERT and BART setting new state of the art on countless natural language
|
||||
understanding tasks. While there are some notable exceptions, most of the available models and research have been
|
||||
conducted for the English language. In this work, we introduce BARThez, the first BART model for the French language
|
||||
(to the best of our knowledge). BARThez was pretrained on a very large monolingual French corpus from past research
|
||||
that we adapted to suit BART's perturbation schemes. Unlike already existing BERT-based French language models such as
|
||||
CamemBERT and FlauBERT, BARThez is particularly well-suited for generative tasks, since not only its encoder but also
|
||||
its decoder is pretrained. In addition to discriminative tasks from the FLUE benchmark, we evaluate BARThez on a novel
|
||||
summarization dataset, OrangeSum, that we release with this paper. We also continue the pretraining of an already
|
||||
pretrained multilingual BART on BARThez's corpus, and we show that the resulting model, which we call mBARTHez,
|
||||
provides a significant boost over vanilla BARThez, and is on par with or outperforms CamemBERT and FlauBERT.*
|
||||
|
||||
This model was contributed by `moussakam <https://huggingface.co/moussakam>`__. The Authors' code can be found `here
|
||||
<https://github.com/moussaKam/BARThez>`__.
|
||||
|
||||
|
||||
Examples
|
||||
_______________________________________________________________________________________________________________________
|
||||
|
||||
- BARThez can be fine-tuned on sequence-to-sequence tasks in a similar way as BART, check:
|
||||
:prefix_link:`examples/pytorch/summarization/ <examples/pytorch/summarization/README.md>`.
|
||||
|
||||
|
||||
BarthezTokenizer
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.BarthezTokenizer
|
||||
:members:
|
||||
|
||||
|
||||
BarthezTokenizerFast
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.BarthezTokenizerFast
|
||||
:members:
|
@ -1,3 +1,15 @@
|
||||
..
|
||||
Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
BERT
|
||||
-----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
@ -30,7 +42,8 @@ Tips:
|
||||
- BERT was trained with the masked language modeling (MLM) and next sentence prediction (NSP) objectives. It is
|
||||
efficient at predicting masked tokens and at NLU in general, but is not optimal for text generation.
|
||||
|
||||
The original code can be found `here <https://github.com/google-research/bert>`__.
|
||||
This model was contributed by `thomwolf <https://huggingface.co/thomwolf>`__. The original code can be found `here
|
||||
<https://github.com/google-research/bert>`__.
|
||||
|
||||
BertConfig
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
@ -78,7 +91,7 @@ BertForPreTraining
|
||||
:members: forward
|
||||
|
||||
|
||||
BertModelLMHeadModel
|
||||
BertLMHeadModel
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.BertLMHeadModel
|
||||
@ -195,3 +208,52 @@ FlaxBertModel
|
||||
|
||||
.. autoclass:: transformers.FlaxBertModel
|
||||
:members: __call__
|
||||
|
||||
|
||||
FlaxBertForPreTraining
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.FlaxBertForPreTraining
|
||||
:members: __call__
|
||||
|
||||
|
||||
FlaxBertForMaskedLM
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.FlaxBertForMaskedLM
|
||||
:members: __call__
|
||||
|
||||
|
||||
FlaxBertForNextSentencePrediction
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.FlaxBertForNextSentencePrediction
|
||||
:members: __call__
|
||||
|
||||
|
||||
FlaxBertForSequenceClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.FlaxBertForSequenceClassification
|
||||
:members: __call__
|
||||
|
||||
|
||||
FlaxBertForMultipleChoice
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.FlaxBertForMultipleChoice
|
||||
:members: __call__
|
||||
|
||||
|
||||
FlaxBertForTokenClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.FlaxBertForTokenClassification
|
||||
:members: __call__
|
||||
|
||||
|
||||
FlaxBertForQuestionAnswering
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.FlaxBertForQuestionAnswering
|
||||
:members: __call__
|
||||
|
80
docs/source/model_doc/bert_japanese.rst
Normal file
80
docs/source/model_doc/bert_japanese.rst
Normal file
@ -0,0 +1,80 @@
|
||||
..
|
||||
Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
BertJapanese
|
||||
-----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
Overview
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The BERT models trained on Japanese text.
|
||||
|
||||
There are models with two different tokenization methods:
|
||||
|
||||
- Tokenize with MeCab and WordPiece. This requires some extra dependencies, `fugashi
|
||||
<https://github.com/polm/fugashi>`__ which is a wrapper around `MeCab <https://taku910.github.io/mecab/>`__.
|
||||
- Tokenize into characters.
|
||||
|
||||
To use `MecabTokenizer`, you should ``pip install transformers["ja"]`` (or ``pip install -e .["ja"]`` if you install
|
||||
from source) to install dependencies.
|
||||
|
||||
See `details on cl-tohoku repository <https://github.com/cl-tohoku/bert-japanese>`__.
|
||||
|
||||
Example of using a model with MeCab and WordPiece tokenization:
|
||||
|
||||
.. code-block::
|
||||
|
||||
>>> import torch
|
||||
>>> from transformers import AutoModel, AutoTokenizer
|
||||
|
||||
>>> bertjapanese = AutoModel.from_pretrained("cl-tohoku/bert-base-japanese")
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("cl-tohoku/bert-base-japanese")
|
||||
|
||||
>>> ## Input Japanese Text
|
||||
>>> line = "吾輩は猫である。"
|
||||
|
||||
>>> inputs = tokenizer(line, return_tensors="pt")
|
||||
|
||||
>>> print(tokenizer.decode(inputs['input_ids'][0]))
|
||||
[CLS] 吾輩 は 猫 で ある 。 [SEP]
|
||||
|
||||
>>> outputs = bertjapanese(**inputs)
|
||||
|
||||
Example of using a model with Character tokenization:
|
||||
|
||||
.. code-block::
|
||||
|
||||
>>> bertjapanese = AutoModel.from_pretrained("cl-tohoku/bert-base-japanese-char")
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("cl-tohoku/bert-base-japanese-char")
|
||||
|
||||
>>> ## Input Japanese Text
|
||||
>>> line = "吾輩は猫である。"
|
||||
|
||||
>>> inputs = tokenizer(line, return_tensors="pt")
|
||||
|
||||
>>> print(tokenizer.decode(inputs['input_ids'][0]))
|
||||
[CLS] 吾 輩 は 猫 で あ る 。 [SEP]
|
||||
|
||||
>>> outputs = bertjapanese(**inputs)
|
||||
|
||||
Tips:
|
||||
|
||||
- This implementation is the same as BERT, except for tokenization method. Refer to the :doc:`documentation of BERT
|
||||
<bert>` for more usage examples.
|
||||
|
||||
This model was contributed by `cl-tohoku <https://huggingface.co/cl-tohoku>`__.
|
||||
|
||||
BertJapaneseTokenizer
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.BertJapaneseTokenizer
|
||||
:members:
|
@ -1,3 +1,15 @@
|
||||
..
|
||||
Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
BertGeneration
|
||||
-----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
@ -10,7 +22,7 @@ Tasks <https://arxiv.org/abs/1907.12461>`__ by Sascha Rothe, Shashi Narayan, Ali
|
||||
|
||||
The abstract from the paper is the following:
|
||||
|
||||
*Unsupervised pre-training of large neural models has recently revolutionized Natural Language Processing. By
|
||||
*Unsupervised pretraining of large neural models has recently revolutionized Natural Language Processing. By
|
||||
warm-starting from the publicly released checkpoints, NLP practitioners have pushed the state-of-the-art on multiple
|
||||
benchmarks while saving significant amounts of compute time. So far the focus has been mainly on the Natural Language
|
||||
Understanding tasks. In this paper, we demonstrate the efficacy of pre-trained checkpoints for Sequence Generation. We
|
||||
@ -26,22 +38,22 @@ Usage:
|
||||
|
||||
.. code-block::
|
||||
|
||||
# leverage checkpoints for Bert2Bert model...
|
||||
# use BERT's cls token as BOS token and sep token as EOS token
|
||||
encoder = BertGenerationEncoder.from_pretrained("bert-large-uncased", bos_token_id=101, eos_token_id=102)
|
||||
# add cross attention layers and use BERT's cls token as BOS token and sep token as EOS token
|
||||
decoder = BertGenerationDecoder.from_pretrained("bert-large-uncased", add_cross_attention=True, is_decoder=True, bos_token_id=101, eos_token_id=102)
|
||||
bert2bert = EncoderDecoderModel(encoder=encoder, decoder=decoder)
|
||||
>>> # leverage checkpoints for Bert2Bert model...
|
||||
>>> # use BERT's cls token as BOS token and sep token as EOS token
|
||||
>>> encoder = BertGenerationEncoder.from_pretrained("bert-large-uncased", bos_token_id=101, eos_token_id=102)
|
||||
>>> # add cross attention layers and use BERT's cls token as BOS token and sep token as EOS token
|
||||
>>> decoder = BertGenerationDecoder.from_pretrained("bert-large-uncased", add_cross_attention=True, is_decoder=True, bos_token_id=101, eos_token_id=102)
|
||||
>>> bert2bert = EncoderDecoderModel(encoder=encoder, decoder=decoder)
|
||||
|
||||
# create tokenizer...
|
||||
tokenizer = BertTokenizer.from_pretrained("bert-large-uncased")
|
||||
>>> # create tokenizer...
|
||||
>>> tokenizer = BertTokenizer.from_pretrained("bert-large-uncased")
|
||||
|
||||
input_ids = tokenizer('This is a long article to summarize', add_special_tokens=False, return_tensors="pt").input_ids
|
||||
labels = tokenizer('This is a short summary', return_tensors="pt").input_ids
|
||||
>>> input_ids = tokenizer('This is a long article to summarize', add_special_tokens=False, return_tensors="pt").input_ids
|
||||
>>> labels = tokenizer('This is a short summary', return_tensors="pt").input_ids
|
||||
|
||||
# train...
|
||||
loss = bert2bert(input_ids=input_ids, decoder_input_ids=labels, labels=labels).loss
|
||||
loss.backward()
|
||||
>>> # train...
|
||||
>>> loss = bert2bert(input_ids=input_ids, decoder_input_ids=labels, labels=labels).loss
|
||||
>>> loss.backward()
|
||||
|
||||
|
||||
- Pretrained :class:`~transformers.EncoderDecoderModel` are also directly available in the model hub, e.g.,
|
||||
@ -49,15 +61,15 @@ Usage:
|
||||
|
||||
.. code-block::
|
||||
|
||||
# instantiate sentence fusion model
|
||||
sentence_fuser = EncoderDecoderModel.from_pretrained("google/roberta2roberta_L-24_discofuse")
|
||||
tokenizer = AutoTokenizer.from_pretrained("google/roberta2roberta_L-24_discofuse")
|
||||
>>> # instantiate sentence fusion model
|
||||
>>> sentence_fuser = EncoderDecoderModel.from_pretrained("google/roberta2roberta_L-24_discofuse")
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("google/roberta2roberta_L-24_discofuse")
|
||||
|
||||
input_ids = tokenizer('This is the first sentence. This is the second sentence.', add_special_tokens=False, return_tensors="pt").input_ids
|
||||
>>> input_ids = tokenizer('This is the first sentence. This is the second sentence.', add_special_tokens=False, return_tensors="pt").input_ids
|
||||
|
||||
outputs = sentence_fuser.generate(input_ids)
|
||||
>>> outputs = sentence_fuser.generate(input_ids)
|
||||
|
||||
print(tokenizer.decode(outputs[0]))
|
||||
>>> print(tokenizer.decode(outputs[0]))
|
||||
|
||||
|
||||
Tips:
|
||||
@ -67,7 +79,8 @@ Tips:
|
||||
- For summarization, sentence splitting, sentence fusion and translation, no special tokens are required for the input.
|
||||
Therefore, no EOS token should be added to the end of the input.
|
||||
|
||||
The original code can be found `here <https://tfhub.dev/s?module-type=text-generation&subtype=module,placeholder>`__.
|
||||
This model was contributed by `patrickvonplaten <https://huggingface.co/patrickvonplaten>`__. The original code can be
|
||||
found `here <https://tfhub.dev/s?module-type=text-generation&subtype=module,placeholder>`__.
|
||||
|
||||
BertGenerationConfig
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
64
docs/source/model_doc/bertweet.rst
Normal file
64
docs/source/model_doc/bertweet.rst
Normal file
@ -0,0 +1,64 @@
|
||||
..
|
||||
Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
Bertweet
|
||||
-----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
Overview
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The BERTweet model was proposed in `BERTweet: A pre-trained language model for English Tweets
|
||||
<https://www.aclweb.org/anthology/2020.emnlp-demos.2.pdf>`__ by Dat Quoc Nguyen, Thanh Vu, Anh Tuan Nguyen.
|
||||
|
||||
The abstract from the paper is the following:
|
||||
|
||||
*We present BERTweet, the first public large-scale pre-trained language model for English Tweets. Our BERTweet, having
|
||||
the same architecture as BERT-base (Devlin et al., 2019), is trained using the RoBERTa pre-training procedure (Liu et
|
||||
al., 2019). Experiments show that BERTweet outperforms strong baselines RoBERTa-base and XLM-R-base (Conneau et al.,
|
||||
2020), producing better performance results than the previous state-of-the-art models on three Tweet NLP tasks:
|
||||
Part-of-speech tagging, Named-entity recognition and text classification.*
|
||||
|
||||
Example of use:
|
||||
|
||||
.. code-block::
|
||||
|
||||
>>> import torch
|
||||
>>> from transformers import AutoModel, AutoTokenizer
|
||||
|
||||
>>> bertweet = AutoModel.from_pretrained("vinai/bertweet-base")
|
||||
|
||||
>>> # For transformers v4.x+:
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("vinai/bertweet-base", use_fast=False)
|
||||
|
||||
>>> # For transformers v3.x:
|
||||
>>> # tokenizer = AutoTokenizer.from_pretrained("vinai/bertweet-base")
|
||||
|
||||
>>> # INPUT TWEET IS ALREADY NORMALIZED!
|
||||
>>> line = "SC has first two presumptive cases of coronavirus , DHEC confirms HTTPURL via @USER :cry:"
|
||||
|
||||
>>> input_ids = torch.tensor([tokenizer.encode(line)])
|
||||
|
||||
>>> with torch.no_grad():
|
||||
... features = bertweet(input_ids) # Models outputs are now tuples
|
||||
|
||||
>>> # With TensorFlow 2.0+:
|
||||
>>> # from transformers import TFAutoModel
|
||||
>>> # bertweet = TFAutoModel.from_pretrained("vinai/bertweet-base")
|
||||
|
||||
This model was contributed by `dqnguyen <https://huggingface.co/dqnguyen>`__. The original code can be found `here
|
||||
<https://github.com/VinAIResearch/BERTweet>`__.
|
||||
|
||||
BertweetTokenizer
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.BertweetTokenizer
|
||||
:members:
|
136
docs/source/model_doc/bigbird.rst
Normal file
136
docs/source/model_doc/bigbird.rst
Normal file
@ -0,0 +1,136 @@
|
||||
..
|
||||
Copyright 2021 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
BigBird
|
||||
-----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
Overview
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The BigBird model was proposed in `Big Bird: Transformers for Longer Sequences <https://arxiv.org/abs/2007.14062>`__ by
|
||||
Zaheer, Manzil and Guruganesh, Guru and Dubey, Kumar Avinava and Ainslie, Joshua and Alberti, Chris and Ontanon,
|
||||
Santiago and Pham, Philip and Ravula, Anirudh and Wang, Qifan and Yang, Li and others. BigBird, is a sparse-attention
|
||||
based transformer which extends Transformer based models, such as BERT to much longer sequences. In addition to sparse
|
||||
attention, BigBird also applies global attention as well as random attention to the input sequence. Theoretically, it
|
||||
has been shown that applying sparse, global, and random attention approximates full attention, while being
|
||||
computationally much more efficient for longer sequences. As a consequence of the capability to handle longer context,
|
||||
BigBird has shown improved performance on various long document NLP tasks, such as question answering and
|
||||
summarization, compared to BERT or RoBERTa.
|
||||
|
||||
The abstract from the paper is the following:
|
||||
|
||||
*Transformers-based models, such as BERT, have been one of the most successful deep learning models for NLP.
|
||||
Unfortunately, one of their core limitations is the quadratic dependency (mainly in terms of memory) on the sequence
|
||||
length due to their full attention mechanism. To remedy this, we propose, BigBird, a sparse attention mechanism that
|
||||
reduces this quadratic dependency to linear. We show that BigBird is a universal approximator of sequence functions and
|
||||
is Turing complete, thereby preserving these properties of the quadratic, full attention model. Along the way, our
|
||||
theoretical analysis reveals some of the benefits of having O(1) global tokens (such as CLS), that attend to the entire
|
||||
sequence as part of the sparse attention mechanism. The proposed sparse attention can handle sequences of length up to
|
||||
8x of what was previously possible using similar hardware. As a consequence of the capability to handle longer context,
|
||||
BigBird drastically improves performance on various NLP tasks such as question answering and summarization. We also
|
||||
propose novel applications to genomics data.*
|
||||
|
||||
Tips:
|
||||
|
||||
- For an in-detail explanation on how BigBird's attention works, see `this blog post
|
||||
<https://huggingface.co/blog/big-bird>`__.
|
||||
- BigBird comes with 2 implementations: **original_full** & **block_sparse**. For the sequence length < 1024, using
|
||||
**original_full** is advised as there is no benefit in using **block_sparse** attention.
|
||||
- The code currently uses window size of 3 blocks and 2 global blocks.
|
||||
- Sequence length must be divisible by block size.
|
||||
- Current implementation supports only **ITC**.
|
||||
- Current implementation doesn't support **num_random_blocks = 0**
|
||||
|
||||
This model was contributed by `vasudevgupta <https://huggingface.co/vasudevgupta>`__. The original code can be found
|
||||
`here <https://github.com/google-research/bigbird>`__.
|
||||
|
||||
BigBirdConfig
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.BigBirdConfig
|
||||
:members:
|
||||
|
||||
|
||||
BigBirdTokenizer
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.BigBirdTokenizer
|
||||
:members: build_inputs_with_special_tokens, get_special_tokens_mask,
|
||||
create_token_type_ids_from_sequences, save_vocabulary
|
||||
|
||||
BigBirdTokenizerFast
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.BigBirdTokenizerFast
|
||||
:members:
|
||||
|
||||
BigBird specific outputs
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.models.big_bird.modeling_big_bird.BigBirdForPreTrainingOutput
|
||||
:members:
|
||||
|
||||
|
||||
BigBirdModel
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.BigBirdModel
|
||||
:members: forward
|
||||
|
||||
|
||||
BigBirdForPreTraining
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.BigBirdForPreTraining
|
||||
:members: forward
|
||||
|
||||
|
||||
BigBirdForCausalLM
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.BigBirdForCausalLM
|
||||
:members: forward
|
||||
|
||||
|
||||
BigBirdForMaskedLM
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.BigBirdForMaskedLM
|
||||
:members: forward
|
||||
|
||||
|
||||
BigBirdForSequenceClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.BigBirdForSequenceClassification
|
||||
:members: forward
|
||||
|
||||
|
||||
BigBirdForMultipleChoice
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.BigBirdForMultipleChoice
|
||||
:members: forward
|
||||
|
||||
|
||||
BigBirdForTokenClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.BigBirdForTokenClassification
|
||||
:members: forward
|
||||
|
||||
|
||||
BigBirdForQuestionAnswering
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.BigBirdForQuestionAnswering
|
||||
:members: forward
|
98
docs/source/model_doc/bigbird_pegasus.rst
Normal file
98
docs/source/model_doc/bigbird_pegasus.rst
Normal file
@ -0,0 +1,98 @@
|
||||
..
|
||||
Copyright 2021 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
BigBirdPegasus
|
||||
-----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
Overview
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The BigBird model was proposed in `Big Bird: Transformers for Longer Sequences <https://arxiv.org/abs/2007.14062>`__ by
|
||||
Zaheer, Manzil and Guruganesh, Guru and Dubey, Kumar Avinava and Ainslie, Joshua and Alberti, Chris and Ontanon,
|
||||
Santiago and Pham, Philip and Ravula, Anirudh and Wang, Qifan and Yang, Li and others. BigBird, is a sparse-attention
|
||||
based transformer which extends Transformer based models, such as BERT to much longer sequences. In addition to sparse
|
||||
attention, BigBird also applies global attention as well as random attention to the input sequence. Theoretically, it
|
||||
has been shown that applying sparse, global, and random attention approximates full attention, while being
|
||||
computationally much more efficient for longer sequences. As a consequence of the capability to handle longer context,
|
||||
BigBird has shown improved performance on various long document NLP tasks, such as question answering and
|
||||
summarization, compared to BERT or RoBERTa.
|
||||
|
||||
The abstract from the paper is the following:
|
||||
|
||||
*Transformers-based models, such as BERT, have been one of the most successful deep learning models for NLP.
|
||||
Unfortunately, one of their core limitations is the quadratic dependency (mainly in terms of memory) on the sequence
|
||||
length due to their full attention mechanism. To remedy this, we propose, BigBird, a sparse attention mechanism that
|
||||
reduces this quadratic dependency to linear. We show that BigBird is a universal approximator of sequence functions and
|
||||
is Turing complete, thereby preserving these properties of the quadratic, full attention model. Along the way, our
|
||||
theoretical analysis reveals some of the benefits of having O(1) global tokens (such as CLS), that attend to the entire
|
||||
sequence as part of the sparse attention mechanism. The proposed sparse attention can handle sequences of length up to
|
||||
8x of what was previously possible using similar hardware. As a consequence of the capability to handle longer context,
|
||||
BigBird drastically improves performance on various NLP tasks such as question answering and summarization. We also
|
||||
propose novel applications to genomics data.*
|
||||
|
||||
Tips:
|
||||
|
||||
- For an in-detail explanation on how BigBird's attention works, see `this blog post
|
||||
<https://huggingface.co/blog/big-bird>`__.
|
||||
- BigBird comes with 2 implementations: **original_full** & **block_sparse**. For the sequence length < 1024, using
|
||||
**original_full** is advised as there is no benefit in using **block_sparse** attention.
|
||||
- The code currently uses window size of 3 blocks and 2 global blocks.
|
||||
- Sequence length must be divisible by block size.
|
||||
- Current implementation supports only **ITC**.
|
||||
- Current implementation doesn't support **num_random_blocks = 0**.
|
||||
- BigBirdPegasus uses the `PegasusTokenizer
|
||||
<https://github.com/huggingface/transformers/blob/master/src/transformers/models/pegasus/tokenization_pegasus.py>`__.
|
||||
|
||||
The original code can be found `here <https://github.com/google-research/bigbird>`__.
|
||||
|
||||
BigBirdPegasusConfig
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.BigBirdPegasusConfig
|
||||
:members:
|
||||
|
||||
|
||||
BigBirdPegasusModel
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.BigBirdPegasusModel
|
||||
:members: forward
|
||||
|
||||
|
||||
BigBirdPegasusForConditionalGeneration
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.BigBirdPegasusForConditionalGeneration
|
||||
:members: forward
|
||||
|
||||
|
||||
BigBirdPegasusForSequenceClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.BigBirdPegasusForSequenceClassification
|
||||
:members: forward
|
||||
|
||||
|
||||
BigBirdPegasusForQuestionAnswering
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.BigBirdPegasusForQuestionAnswering
|
||||
:members: forward
|
||||
|
||||
|
||||
BigBirdPegasusForCausalLM
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.BigBirdPegasusForCausalLM
|
||||
:members: forward
|
||||
|
||||
|
@ -1,3 +1,15 @@
|
||||
..
|
||||
Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
Blenderbot
|
||||
-----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
@ -24,20 +36,18 @@ and code publicly available. Human evaluations show our best models are superior
|
||||
dialogue in terms of engagingness and humanness measurements. We then discuss the limitations of this work by analyzing
|
||||
failure cases of our models.*
|
||||
|
||||
The authors' code can be found `here <https://github.com/facebookresearch/ParlAI>`__ .
|
||||
This model was contributed by `sshleifer <https://huggingface.co/sshleifer>`__. The authors' code can be found `here
|
||||
<https://github.com/facebookresearch/ParlAI>`__ .
|
||||
|
||||
|
||||
Implementation Notes
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
- Blenderbot uses a standard `seq2seq model transformer <https://arxiv.org/pdf/1706.03762.pdf>`__ based architecture.
|
||||
- It inherits completely from :class:`~transformers.BartForConditionalGeneration`
|
||||
- Even though blenderbot is one model, it uses two tokenizers :class:`~transformers.BlenderbotSmallTokenizer` for 90M
|
||||
checkpoint and :class:`~transformers.BlenderbotTokenizer` for all other checkpoints.
|
||||
- :class:`~transformers.BlenderbotSmallTokenizer` will always return :class:`~transformers.BlenderbotSmallTokenizer`,
|
||||
regardless of checkpoint. To use the 3B parameter checkpoint, you must call
|
||||
:class:`~transformers.BlenderbotTokenizer` directly.
|
||||
- Available checkpoints can be found in the `model hub <https://huggingface.co/models?search=blenderbot>`__.
|
||||
- This is the `default` Blenderbot model class. However, some smaller checkpoints, such as
|
||||
``facebook/blenderbot_small_90M``, have a different architecture and consequently should be used with
|
||||
`BlenderbotSmall <https://huggingface.co/transformers/master/model_doc/blenderbot_small.html>`__.
|
||||
|
||||
|
||||
Usage
|
||||
@ -47,26 +57,15 @@ Here is an example of model usage:
|
||||
|
||||
.. code-block::
|
||||
|
||||
>>> from transformers import BlenderbotSmallTokenizer, BlenderbotForConditionalGeneration
|
||||
>>> mname = 'facebook/blenderbot-90M'
|
||||
>>> from transformers import BlenderbotTokenizer, BlenderbotForConditionalGeneration
|
||||
>>> mname = 'facebook/blenderbot-400M-distill'
|
||||
>>> model = BlenderbotForConditionalGeneration.from_pretrained(mname)
|
||||
>>> tokenizer = BlenderbotSmallTokenizer.from_pretrained(mname)
|
||||
>>> tokenizer = BlenderbotTokenizer.from_pretrained(mname)
|
||||
>>> UTTERANCE = "My friends are cool but they eat too many carbs."
|
||||
>>> inputs = tokenizer([UTTERANCE], return_tensors='pt')
|
||||
>>> reply_ids = model.generate(**inputs)
|
||||
>>> print([tokenizer.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=False) for g in reply_ids])
|
||||
|
||||
|
||||
Here is how you can check out config values:
|
||||
|
||||
.. code-block::
|
||||
|
||||
|
||||
>>> from transformers import BlenderbotConfig
|
||||
>>> config_90 = BlenderbotConfig.from_pretrained("facebook/blenderbot-90M")
|
||||
>>> config_90.to_diff_dict() # show interesting Values.
|
||||
>>> configuration_3B = BlenderbotConfig("facebook/blenderbot-3B")
|
||||
>>> configuration_3B.to_diff_dict()
|
||||
>>> print(tokenizer.batch_decode(reply_ids))
|
||||
["<s> That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?</s>"]
|
||||
|
||||
|
||||
BlenderbotConfig
|
||||
@ -81,11 +80,14 @@ BlenderbotTokenizer
|
||||
.. autoclass:: transformers.BlenderbotTokenizer
|
||||
:members: build_inputs_with_special_tokens
|
||||
|
||||
BlenderbotSmallTokenizer
|
||||
|
||||
BlenderbotModel
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.BlenderbotSmallTokenizer
|
||||
:members:
|
||||
See :obj:`transformers.BartModel` for arguments to `forward` and `generate`
|
||||
|
||||
.. autoclass:: transformers.BlenderbotModel
|
||||
:members: forward
|
||||
|
||||
|
||||
BlenderbotForConditionalGeneration
|
||||
@ -94,13 +96,25 @@ BlenderbotForConditionalGeneration
|
||||
See :obj:`transformers.BartForConditionalGeneration` for arguments to `forward` and `generate`
|
||||
|
||||
.. autoclass:: transformers.BlenderbotForConditionalGeneration
|
||||
:members:
|
||||
:members: forward
|
||||
|
||||
|
||||
BlenderbotForCausalLM
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.BlenderbotForCausalLM
|
||||
:members: forward
|
||||
|
||||
|
||||
TFBlenderbotModel
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.TFBlenderbotModel
|
||||
:members: call
|
||||
|
||||
|
||||
TFBlenderbotForConditionalGeneration
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
See :obj:`transformers.TFBartForConditionalGeneration` for arguments to `forward` and `generate`
|
||||
|
||||
.. autoclass:: transformers.TFBlenderbotForConditionalGeneration
|
||||
:members:
|
||||
:members: call
|
||||
|
92
docs/source/model_doc/blenderbot_small.rst
Normal file
92
docs/source/model_doc/blenderbot_small.rst
Normal file
@ -0,0 +1,92 @@
|
||||
..
|
||||
Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
Blenderbot Small
|
||||
-----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
Note that :class:`~transformers.BlenderbotSmallModel` and
|
||||
:class:`~transformers.BlenderbotSmallForConditionalGeneration` are only used in combination with the checkpoint
|
||||
`facebook/blenderbot-90M <https://huggingface.co/facebook/blenderbot-90M>`__. Larger Blenderbot checkpoints should
|
||||
instead be used with :class:`~transformers.BlenderbotModel` and
|
||||
:class:`~transformers.BlenderbotForConditionalGeneration`
|
||||
|
||||
Overview
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The Blender chatbot model was proposed in `Recipes for building an open-domain chatbot
|
||||
<https://arxiv.org/pdf/2004.13637.pdf>`__ Stephen Roller, Emily Dinan, Naman Goyal, Da Ju, Mary Williamson, Yinhan Liu,
|
||||
Jing Xu, Myle Ott, Kurt Shuster, Eric M. Smith, Y-Lan Boureau, Jason Weston on 30 Apr 2020.
|
||||
|
||||
The abstract of the paper is the following:
|
||||
|
||||
*Building open-domain chatbots is a challenging area for machine learning research. While prior work has shown that
|
||||
scaling neural models in the number of parameters and the size of the data they are trained on gives improved results,
|
||||
we show that other ingredients are important for a high-performing chatbot. Good conversation requires a number of
|
||||
skills that an expert conversationalist blends in a seamless way: providing engaging talking points and listening to
|
||||
their partners, and displaying knowledge, empathy and personality appropriately, while maintaining a consistent
|
||||
persona. We show that large scale models can learn these skills when given appropriate training data and choice of
|
||||
generation strategy. We build variants of these recipes with 90M, 2.7B and 9.4B parameter models, and make our models
|
||||
and code publicly available. Human evaluations show our best models are superior to existing approaches in multi-turn
|
||||
dialogue in terms of engagingness and humanness measurements. We then discuss the limitations of this work by analyzing
|
||||
failure cases of our models.*
|
||||
|
||||
This model was contributed by `patrickvonplaten <https://huggingface.co/patrickvonplaten>`__. The authors' code can be
|
||||
found `here <https://github.com/facebookresearch/ParlAI>`__ .
|
||||
|
||||
BlenderbotSmallConfig
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.BlenderbotSmallConfig
|
||||
:members:
|
||||
|
||||
|
||||
BlenderbotSmallTokenizer
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.BlenderbotSmallTokenizer
|
||||
:members: build_inputs_with_special_tokens, get_special_tokens_mask,
|
||||
create_token_type_ids_from_sequences, save_vocabulary
|
||||
|
||||
|
||||
BlenderbotSmallModel
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.BlenderbotSmallModel
|
||||
:members: forward
|
||||
|
||||
|
||||
BlenderbotSmallForConditionalGeneration
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.BlenderbotSmallForConditionalGeneration
|
||||
:members: forward
|
||||
|
||||
|
||||
BlenderbotSmallForCausalLM
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.BlenderbotSmallForCausalLM
|
||||
:members: forward
|
||||
|
||||
|
||||
TFBlenderbotSmallModel
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.TFBlenderbotSmallModel
|
||||
:members: call
|
||||
|
||||
|
||||
TFBlenderbotSmallForConditionalGeneration
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.TFBlenderbotSmallForConditionalGeneration
|
||||
:members: call
|
47
docs/source/model_doc/bort.rst
Normal file
47
docs/source/model_doc/bort.rst
Normal file
@ -0,0 +1,47 @@
|
||||
..
|
||||
Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
BORT
|
||||
-----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
Overview
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The BORT model was proposed in `Optimal Subarchitecture Extraction for BERT <https://arxiv.org/abs/2010.10499>`__ by
|
||||
Adrian de Wynter and Daniel J. Perry. It is an optimal subset of architectural parameters for the BERT, which the
|
||||
authors refer to as "Bort".
|
||||
|
||||
The abstract from the paper is the following:
|
||||
|
||||
*We extract an optimal subset of architectural parameters for the BERT architecture from Devlin et al. (2018) by
|
||||
applying recent breakthroughs in algorithms for neural architecture search. This optimal subset, which we refer to as
|
||||
"Bort", is demonstrably smaller, having an effective (that is, not counting the embedding layer) size of 5.5% the
|
||||
original BERT-large architecture, and 16% of the net size. Bort is also able to be pretrained in 288 GPU hours, which
|
||||
is 1.2% of the time required to pretrain the highest-performing BERT parametric architectural variant, RoBERTa-large
|
||||
(Liu et al., 2019), and about 33% of that of the world-record, in GPU hours, required to train BERT-large on the same
|
||||
hardware. It is also 7.9x faster on a CPU, as well as being better performing than other compressed variants of the
|
||||
architecture, and some of the non-compressed variants: it obtains performance improvements of between 0.3% and 31%,
|
||||
absolute, with respect to BERT-large, on multiple public natural language understanding (NLU) benchmarks.*
|
||||
|
||||
Tips:
|
||||
|
||||
- BORT's model architecture is based on BERT, so one can refer to :doc:`BERT's documentation page <bert>` for the
|
||||
model's API as well as usage examples.
|
||||
- BORT uses the RoBERTa tokenizer instead of the BERT tokenizer, so one can refer to :doc:`RoBERTa's documentation page
|
||||
<roberta>` for the tokenizer's API as well as usage examples.
|
||||
- BORT requires a specific fine-tuning algorithm, called `Agora
|
||||
<https://adewynter.github.io/notes/bort_algorithms_and_applications.html#fine-tuning-with-algebraic-topology>`__ ,
|
||||
that is sadly not open-sourced yet. It would be very useful for the community, if someone tries to implement the
|
||||
algorithm to make BORT fine-tuning work.
|
||||
|
||||
This model was contributed by `stefan-it <https://huggingface.co/stefan-it>`__. The original code can be found `here
|
||||
<https://github.com/alexa/bort/>`__.
|
@ -1,3 +1,15 @@
|
||||
..
|
||||
Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
CamemBERT
|
||||
-----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
@ -25,7 +37,8 @@ Tips:
|
||||
- This implementation is the same as RoBERTa. Refer to the :doc:`documentation of RoBERTa <roberta>` for usage examples
|
||||
as well as the information relative to the inputs and outputs.
|
||||
|
||||
The original code can be found `here <https://camembert-model.fr/>`__.
|
||||
This model was contributed by `camembert <https://huggingface.co/camembert>`__. The original code can be found `here
|
||||
<https://camembert-model.fr/>`__.
|
||||
|
||||
CamembertConfig
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
@ -42,6 +55,13 @@ CamembertTokenizer
|
||||
create_token_type_ids_from_sequences, save_vocabulary
|
||||
|
||||
|
||||
CamembertTokenizerFast
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.CamembertTokenizerFast
|
||||
:members:
|
||||
|
||||
|
||||
CamembertModel
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
|
154
docs/source/model_doc/clip.rst
Normal file
154
docs/source/model_doc/clip.rst
Normal file
@ -0,0 +1,154 @@
|
||||
..
|
||||
Copyright 2021 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
CLIP
|
||||
-----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
Overview
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The CLIP model was proposed in `Learning Transferable Visual Models From Natural Language Supervision
|
||||
<https://arxiv.org/abs/2103.00020>`__ by Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh,
|
||||
Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, Ilya Sutskever. CLIP
|
||||
(Contrastive Language-Image Pre-Training) is a neural network trained on a variety of (image, text) pairs. It can be
|
||||
instructed in natural language to predict the most relevant text snippet, given an image, without directly optimizing
|
||||
for the task, similarly to the zero-shot capabilities of GPT-2 and 3.
|
||||
|
||||
The abstract from the paper is the following:
|
||||
|
||||
*State-of-the-art computer vision systems are trained to predict a fixed set of predetermined object categories. This
|
||||
restricted form of supervision limits their generality and usability since additional labeled data is needed to specify
|
||||
any other visual concept. Learning directly from raw text about images is a promising alternative which leverages a
|
||||
much broader source of supervision. We demonstrate that the simple pre-training task of predicting which caption goes
|
||||
with which image is an efficient and scalable way to learn SOTA image representations from scratch on a dataset of 400
|
||||
million (image, text) pairs collected from the internet. After pre-training, natural language is used to reference
|
||||
learned visual concepts (or describe new ones) enabling zero-shot transfer of the model to downstream tasks. We study
|
||||
the performance of this approach by benchmarking on over 30 different existing computer vision datasets, spanning tasks
|
||||
such as OCR, action recognition in videos, geo-localization, and many types of fine-grained object classification. The
|
||||
model transfers non-trivially to most tasks and is often competitive with a fully supervised baseline without the need
|
||||
for any dataset specific training. For instance, we match the accuracy of the original ResNet-50 on ImageNet zero-shot
|
||||
without needing to use any of the 1.28 million training examples it was trained on. We release our code and pre-trained
|
||||
model weights at this https URL.*
|
||||
|
||||
Usage
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
CLIP is a multi-modal vision and language model. It can be used for image-text similarity and for zero-shot image
|
||||
classification. CLIP uses a ViT like transformer to get visual features and a causal language model to get the text
|
||||
features. Both the text and visual features are then projected to a latent space with identical dimension. The dot
|
||||
product between the projected image and text features is then used as a similar score.
|
||||
|
||||
To feed images to the Transformer encoder, each image is split into a sequence of fixed-size non-overlapping patches,
|
||||
which are then linearly embedded. A [CLS] token is added to serve as representation of an entire image. The authors
|
||||
also add absolute position embeddings, and feed the resulting sequence of vectors to a standard Transformer encoder.
|
||||
The :class:`~transformers.CLIPFeatureExtractor` can be used to resize (or rescale) and normalize images for the model.
|
||||
|
||||
The :class:`~transformers.CLIPTokenizer` is used to encode the text. The :class:`~transformers.CLIPProcessor` wraps
|
||||
:class:`~transformers.CLIPFeatureExtractor` and :class:`~transformers.CLIPTokenizer` into a single instance to both
|
||||
encode the text and prepare the images. The following example shows how to get the image-text similarity scores using
|
||||
:class:`~transformers.CLIPProcessor` and :class:`~transformers.CLIPModel`.
|
||||
|
||||
|
||||
.. code-block::
|
||||
|
||||
>>> import torch
|
||||
>>> from PIL import Image
|
||||
>>> import requests
|
||||
|
||||
>>> from transformers import CLIPProcessor, CLIPModel
|
||||
|
||||
>>> model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
|
||||
>>> processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
|
||||
|
||||
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
||||
>>> image = Image.open(requests.get(url, stream=True).raw)
|
||||
|
||||
>>> inputs = processor(text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="pt", padding=True)
|
||||
|
||||
>>> outputs = model(**inputs)
|
||||
>>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score
|
||||
>>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities
|
||||
|
||||
|
||||
This model was contributed by `valhalla <https://huggingface.co/valhalla>`__. The original code can be found `here
|
||||
<https://github.com/openai/CLIP>`__.
|
||||
|
||||
CLIPConfig
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.CLIPConfig
|
||||
:members: from_text_vision_configs
|
||||
|
||||
|
||||
CLIPTextConfig
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.CLIPTextConfig
|
||||
:members:
|
||||
|
||||
|
||||
CLIPVisionConfig
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.CLIPVisionConfig
|
||||
:members:
|
||||
|
||||
|
||||
|
||||
CLIPTokenizer
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.CLIPTokenizer
|
||||
:members: build_inputs_with_special_tokens, get_special_tokens_mask,
|
||||
create_token_type_ids_from_sequences, save_vocabulary
|
||||
|
||||
CLIPTokenizerFast
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.CLIPTokenizerFast
|
||||
:members:
|
||||
|
||||
|
||||
CLIPFeatureExtractor
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.CLIPFeatureExtractor
|
||||
:members:
|
||||
|
||||
|
||||
CLIPProcessor
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.CLIPProcessor
|
||||
:members:
|
||||
|
||||
|
||||
|
||||
CLIPModel
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.CLIPModel
|
||||
:members: forward, get_text_features, get_image_features
|
||||
|
||||
|
||||
CLIPTextModel
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.CLIPTextModel
|
||||
:members: forward
|
||||
|
||||
|
||||
CLIPVisionModel
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.CLIPVisionModel
|
||||
:members: forward
|
145
docs/source/model_doc/convbert.rst
Normal file
145
docs/source/model_doc/convbert.rst
Normal file
@ -0,0 +1,145 @@
|
||||
..
|
||||
Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
ConvBERT
|
||||
-----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
Overview
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The ConvBERT model was proposed in `ConvBERT: Improving BERT with Span-based Dynamic Convolution
|
||||
<https://arxiv.org/abs/2008.02496>`__ by Zihang Jiang, Weihao Yu, Daquan Zhou, Yunpeng Chen, Jiashi Feng, Shuicheng
|
||||
Yan.
|
||||
|
||||
The abstract from the paper is the following:
|
||||
|
||||
*Pre-trained language models like BERT and its variants have recently achieved impressive performance in various
|
||||
natural language understanding tasks. However, BERT heavily relies on the global self-attention block and thus suffers
|
||||
large memory footprint and computation cost. Although all its attention heads query on the whole input sequence for
|
||||
generating the attention map from a global perspective, we observe some heads only need to learn local dependencies,
|
||||
which means the existence of computation redundancy. We therefore propose a novel span-based dynamic convolution to
|
||||
replace these self-attention heads to directly model local dependencies. The novel convolution heads, together with the
|
||||
rest self-attention heads, form a new mixed attention block that is more efficient at both global and local context
|
||||
learning. We equip BERT with this mixed attention design and build a ConvBERT model. Experiments have shown that
|
||||
ConvBERT significantly outperforms BERT and its variants in various downstream tasks, with lower training cost and
|
||||
fewer model parameters. Remarkably, ConvBERTbase model achieves 86.4 GLUE score, 0.7 higher than ELECTRAbase, while
|
||||
using less than 1/4 training cost. Code and pre-trained models will be released.*
|
||||
|
||||
ConvBERT training tips are similar to those of BERT.
|
||||
|
||||
This model was contributed by `abhishek <https://huggingface.co/abhishek>`__. The original implementation can be found
|
||||
here: https://github.com/yitu-opensource/ConvBert
|
||||
|
||||
ConvBertConfig
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.ConvBertConfig
|
||||
:members:
|
||||
|
||||
|
||||
ConvBertTokenizer
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.ConvBertTokenizer
|
||||
:members: build_inputs_with_special_tokens, get_special_tokens_mask,
|
||||
create_token_type_ids_from_sequences, save_vocabulary
|
||||
|
||||
|
||||
ConvBertTokenizerFast
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.ConvBertTokenizerFast
|
||||
:members:
|
||||
|
||||
|
||||
ConvBertModel
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.ConvBertModel
|
||||
:members: forward
|
||||
|
||||
|
||||
ConvBertForMaskedLM
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.ConvBertForMaskedLM
|
||||
:members: forward
|
||||
|
||||
|
||||
ConvBertForSequenceClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.ConvBertForSequenceClassification
|
||||
:members: forward
|
||||
|
||||
|
||||
ConvBertForMultipleChoice
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.ConvBertForMultipleChoice
|
||||
:members: forward
|
||||
|
||||
|
||||
ConvBertForTokenClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.ConvBertForTokenClassification
|
||||
:members: forward
|
||||
|
||||
|
||||
ConvBertForQuestionAnswering
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.ConvBertForQuestionAnswering
|
||||
:members: forward
|
||||
|
||||
|
||||
TFConvBertModel
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.TFConvBertModel
|
||||
:members: call
|
||||
|
||||
|
||||
TFConvBertForMaskedLM
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.TFConvBertForMaskedLM
|
||||
:members: call
|
||||
|
||||
|
||||
TFConvBertForSequenceClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.TFConvBertForSequenceClassification
|
||||
:members: call
|
||||
|
||||
|
||||
TFConvBertForMultipleChoice
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.TFConvBertForMultipleChoice
|
||||
:members: call
|
||||
|
||||
|
||||
TFConvBertForTokenClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.TFConvBertForTokenClassification
|
||||
:members: call
|
||||
|
||||
|
||||
TFConvBertForQuestionAnswering
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.TFConvBertForQuestionAnswering
|
||||
:members: call
|
45
docs/source/model_doc/cpm.rst
Normal file
45
docs/source/model_doc/cpm.rst
Normal file
@ -0,0 +1,45 @@
|
||||
..
|
||||
Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
CPM
|
||||
-----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
Overview
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The CPM model was proposed in `CPM: A Large-scale Generative Chinese Pre-trained Language Model
|
||||
<https://arxiv.org/abs/2012.00413>`__ by Zhengyan Zhang, Xu Han, Hao Zhou, Pei Ke, Yuxian Gu, Deming Ye, Yujia Qin,
|
||||
Yusheng Su, Haozhe Ji, Jian Guan, Fanchao Qi, Xiaozhi Wang, Yanan Zheng, Guoyang Zeng, Huanqi Cao, Shengqi Chen,
|
||||
Daixuan Li, Zhenbo Sun, Zhiyuan Liu, Minlie Huang, Wentao Han, Jie Tang, Juanzi Li, Xiaoyan Zhu, Maosong Sun.
|
||||
|
||||
The abstract from the paper is the following:
|
||||
|
||||
*Pre-trained Language Models (PLMs) have proven to be beneficial for various downstream NLP tasks. Recently, GPT-3,
|
||||
with 175 billion parameters and 570GB training data, drew a lot of attention due to the capacity of few-shot (even
|
||||
zero-shot) learning. However, applying GPT-3 to address Chinese NLP tasks is still challenging, as the training corpus
|
||||
of GPT-3 is primarily English, and the parameters are not publicly available. In this technical report, we release the
|
||||
Chinese Pre-trained Language Model (CPM) with generative pre-training on large-scale Chinese training data. To the best
|
||||
of our knowledge, CPM, with 2.6 billion parameters and 100GB Chinese training data, is the largest Chinese pre-trained
|
||||
language model, which could facilitate several downstream Chinese NLP tasks, such as conversation, essay generation,
|
||||
cloze test, and language understanding. Extensive experiments demonstrate that CPM achieves strong performance on many
|
||||
NLP tasks in the settings of few-shot (even zero-shot) learning.*
|
||||
|
||||
This model was contributed by `canwenxu <https://huggingface.co/canwenxu>`__. The original implementation can be found
|
||||
here: https://github.com/TsinghuaAI/CPM-Generate
|
||||
|
||||
Note: We only have a tokenizer here, since the model architecture is the same as GPT-2.
|
||||
|
||||
CpmTokenizer
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.CpmTokenizer
|
||||
:members:
|
@ -1,3 +1,15 @@
|
||||
..
|
||||
Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
CTRL
|
||||
-----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
@ -34,7 +46,8 @@ Tips:
|
||||
`reusing the past in generative models <../quickstart.html#using-the-past>`__ for more information on the usage of
|
||||
this argument.
|
||||
|
||||
The original code can be found `here <https://github.com/salesforce/ctrl>`__.
|
||||
This model was contributed by `keskarnitishr <https://huggingface.co/keskarnitishr>`__. The original code can be found
|
||||
`here <https://github.com/salesforce/ctrl>`__.
|
||||
|
||||
|
||||
CTRLConfig
|
||||
@ -65,6 +78,13 @@ CTRLLMHeadModel
|
||||
:members: forward
|
||||
|
||||
|
||||
CTRLForSequenceClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.CTRLForSequenceClassification
|
||||
:members: forward
|
||||
|
||||
|
||||
TFCTRLModel
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
@ -78,3 +98,8 @@ TFCTRLLMHeadModel
|
||||
.. autoclass:: transformers.TFCTRLLMHeadModel
|
||||
:members: call
|
||||
|
||||
TFCTRLForSequenceClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.TFCTRLForSequenceClassification
|
||||
:members: call
|
||||
|
@ -1,3 +1,15 @@
|
||||
..
|
||||
Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
DeBERTa
|
||||
-----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
@ -20,13 +32,14 @@ disentangled attention mechanism, where each word is represented using two vecto
|
||||
position, respectively, and the attention weights among words are computed using disentangled matrices on their
|
||||
contents and relative positions. Second, an enhanced mask decoder is used to replace the output softmax layer to
|
||||
predict the masked tokens for model pretraining. We show that these two techniques significantly improve the efficiency
|
||||
of model pre-training and performance of downstream tasks. Compared to RoBERTa-Large, a DeBERTa model trained on half
|
||||
of the training data performs consistently better on a wide range of NLP tasks, achieving improvements on MNLI by +0.9%
|
||||
of model pretraining and performance of downstream tasks. Compared to RoBERTa-Large, a DeBERTa model trained on half of
|
||||
the training data performs consistently better on a wide range of NLP tasks, achieving improvements on MNLI by +0.9%
|
||||
(90.2% vs. 91.1%), on SQuAD v2.0 by +2.3% (88.4% vs. 90.7%) and RACE by +3.6% (83.2% vs. 86.8%). The DeBERTa code and
|
||||
pre-trained models will be made publicly available at https://github.com/microsoft/DeBERTa.*
|
||||
|
||||
|
||||
The original code can be found `here <https://github.com/microsoft/DeBERTa>`__.
|
||||
This model was contributed by `DeBERTa <https://huggingface.co/DeBERTa>`__. The original code can be found `here
|
||||
<https://github.com/microsoft/DeBERTa>`__.
|
||||
|
||||
|
||||
DebertaConfig
|
||||
@ -43,12 +56,18 @@ DebertaTokenizer
|
||||
:members: build_inputs_with_special_tokens, get_special_tokens_mask,
|
||||
create_token_type_ids_from_sequences, save_vocabulary
|
||||
|
||||
DebertaTokenizerFast
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.DebertaTokenizerFast
|
||||
:members: build_inputs_with_special_tokens, create_token_type_ids_from_sequences
|
||||
|
||||
|
||||
DebertaModel
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.DebertaModel
|
||||
:members:
|
||||
:members: forward
|
||||
|
||||
|
||||
DebertaPreTrainedModel
|
||||
@ -58,8 +77,29 @@ DebertaPreTrainedModel
|
||||
:members:
|
||||
|
||||
|
||||
DebertaForMaskedLM
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.DebertaForMaskedLM
|
||||
:members: forward
|
||||
|
||||
|
||||
DebertaForSequenceClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.DebertaForSequenceClassification
|
||||
:members:
|
||||
:members: forward
|
||||
|
||||
|
||||
DebertaForTokenClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.DebertaForTokenClassification
|
||||
:members: forward
|
||||
|
||||
|
||||
DebertaForQuestionAnswering
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.DebertaForQuestionAnswering
|
||||
:members: forward
|
||||
|
119
docs/source/model_doc/deberta_v2.rst
Normal file
119
docs/source/model_doc/deberta_v2.rst
Normal file
@ -0,0 +1,119 @@
|
||||
..
|
||||
Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
DeBERTa-v2
|
||||
-----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
Overview
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The DeBERTa model was proposed in `DeBERTa: Decoding-enhanced BERT with Disentangled Attention
|
||||
<https://arxiv.org/abs/2006.03654>`__ by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen It is based on Google's
|
||||
BERT model released in 2018 and Facebook's RoBERTa model released in 2019.
|
||||
|
||||
It builds on RoBERTa with disentangled attention and enhanced mask decoder training with half of the data used in
|
||||
RoBERTa.
|
||||
|
||||
The abstract from the paper is the following:
|
||||
|
||||
*Recent progress in pre-trained neural language models has significantly improved the performance of many natural
|
||||
language processing (NLP) tasks. In this paper we propose a new model architecture DeBERTa (Decoding-enhanced BERT with
|
||||
disentangled attention) that improves the BERT and RoBERTa models using two novel techniques. The first is the
|
||||
disentangled attention mechanism, where each word is represented using two vectors that encode its content and
|
||||
position, respectively, and the attention weights among words are computed using disentangled matrices on their
|
||||
contents and relative positions. Second, an enhanced mask decoder is used to replace the output softmax layer to
|
||||
predict the masked tokens for model pretraining. We show that these two techniques significantly improve the efficiency
|
||||
of model pretraining and performance of downstream tasks. Compared to RoBERTa-Large, a DeBERTa model trained on half of
|
||||
the training data performs consistently better on a wide range of NLP tasks, achieving improvements on MNLI by +0.9%
|
||||
(90.2% vs. 91.1%), on SQuAD v2.0 by +2.3% (88.4% vs. 90.7%) and RACE by +3.6% (83.2% vs. 86.8%). The DeBERTa code and
|
||||
pre-trained models will be made publicly available at https://github.com/microsoft/DeBERTa.*
|
||||
|
||||
|
||||
The following information is visible directly on the [original implementation
|
||||
repository](https://github.com/microsoft/DeBERTa). DeBERTa v2 is the second version of the DeBERTa model. It includes
|
||||
the 1.5B model used for the SuperGLUE single-model submission and achieving 89.9, versus human baseline 89.8. You can
|
||||
find more details about this submission in the authors'
|
||||
[blog](https://www.microsoft.com/en-us/research/blog/microsoft-deberta-surpasses-human-performance-on-the-superglue-benchmark/)
|
||||
|
||||
New in v2:
|
||||
|
||||
- **Vocabulary** In v2 the tokenizer is changed to use a new vocabulary of size 128K built from the training data.
|
||||
Instead of a GPT2-based tokenizer, the tokenizer is now
|
||||
[sentencepiece-based](https://github.com/google/sentencepiece) tokenizer.
|
||||
- **nGiE(nGram Induced Input Encoding)** The DeBERTa-v2 model uses an additional convolution layer aside with the first
|
||||
transformer layer to better learn the local dependency of input tokens.
|
||||
- **Sharing position projection matrix with content projection matrix in attention layer** Based on previous
|
||||
experiments, this can save parameters without affecting the performance.
|
||||
- **Apply bucket to encode relative postions** The DeBERTa-v2 model uses log bucket to encode relative positions
|
||||
similar to T5.
|
||||
- **900M model & 1.5B model** Two additional model sizes are available: 900M and 1.5B, which significantly improves the
|
||||
performance of downstream tasks.
|
||||
|
||||
This model was contributed by `DeBERTa <https://huggingface.co/DeBERTa>`__. The original code can be found `here
|
||||
<https://github.com/microsoft/DeBERTa>`__.
|
||||
|
||||
|
||||
DebertaV2Config
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.DebertaV2Config
|
||||
:members:
|
||||
|
||||
|
||||
DebertaV2Tokenizer
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.DebertaV2Tokenizer
|
||||
:members: build_inputs_with_special_tokens, get_special_tokens_mask,
|
||||
create_token_type_ids_from_sequences, save_vocabulary
|
||||
|
||||
|
||||
DebertaV2Model
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.DebertaV2Model
|
||||
:members: forward
|
||||
|
||||
|
||||
DebertaV2PreTrainedModel
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.DebertaV2PreTrainedModel
|
||||
:members: forward
|
||||
|
||||
|
||||
DebertaV2ForMaskedLM
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.DebertaV2ForMaskedLM
|
||||
:members: forward
|
||||
|
||||
|
||||
DebertaV2ForSequenceClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.DebertaV2ForSequenceClassification
|
||||
:members: forward
|
||||
|
||||
|
||||
DebertaV2ForTokenClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.DebertaV2ForTokenClassification
|
||||
:members: forward
|
||||
|
||||
|
||||
DebertaV2ForQuestionAnswering
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.DebertaV2ForQuestionAnswering
|
||||
:members: forward
|
111
docs/source/model_doc/deit.rst
Normal file
111
docs/source/model_doc/deit.rst
Normal file
@ -0,0 +1,111 @@
|
||||
..
|
||||
Copyright 2021 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
DeiT
|
||||
-----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
.. note::
|
||||
|
||||
This is a recently introduced model so the API hasn't been tested extensively. There may be some bugs or slight
|
||||
breaking changes to fix it in the future. If you see something strange, file a `Github Issue
|
||||
<https://github.com/huggingface/transformers/issues/new?assignees=&labels=&template=bug-report.md&title>`__.
|
||||
|
||||
|
||||
Overview
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The DeiT model was proposed in `Training data-efficient image transformers & distillation through attention
|
||||
<https://arxiv.org/abs/2012.12877>`__ by Hugo Touvron, Matthieu Cord, Matthijs Douze, Francisco Massa, Alexandre
|
||||
Sablayrolles, Hervé Jégou. The `Vision Transformer (ViT) <https://huggingface.co/transformers/model_doc/vit.html>`__
|
||||
introduced in `Dosovitskiy et al., 2020 <https://arxiv.org/abs/2010.11929>`__ has shown that one can match or even
|
||||
outperform existing convolutional neural networks using a Transformer encoder (BERT-like). However, the ViT models
|
||||
introduced in that paper required training on expensive infrastructure for multiple weeks, using external data. DeiT
|
||||
(data-efficient image transformers) are more efficiently trained transformers for image classification, requiring far
|
||||
less data and far less computing resources compared to the original ViT models.
|
||||
|
||||
The abstract from the paper is the following:
|
||||
|
||||
*Recently, neural networks purely based on attention were shown to address image understanding tasks such as image
|
||||
classification. However, these visual transformers are pre-trained with hundreds of millions of images using an
|
||||
expensive infrastructure, thereby limiting their adoption. In this work, we produce a competitive convolution-free
|
||||
transformer by training on Imagenet only. We train them on a single computer in less than 3 days. Our reference vision
|
||||
transformer (86M parameters) achieves top-1 accuracy of 83.1% (single-crop evaluation) on ImageNet with no external
|
||||
data. More importantly, we introduce a teacher-student strategy specific to transformers. It relies on a distillation
|
||||
token ensuring that the student learns from the teacher through attention. We show the interest of this token-based
|
||||
distillation, especially when using a convnet as a teacher. This leads us to report results competitive with convnets
|
||||
for both Imagenet (where we obtain up to 85.2% accuracy) and when transferring to other tasks. We share our code and
|
||||
models.*
|
||||
|
||||
Tips:
|
||||
|
||||
- Compared to ViT, DeiT models use a so-called distillation token to effectively learn from a teacher (which, in the
|
||||
DeiT paper, is a ResNet like-model). The distillation token is learned through backpropagation, by interacting with
|
||||
the class ([CLS]) and patch tokens through the self-attention layers.
|
||||
- There are 2 ways to fine-tune distilled models, either (1) in a classic way, by only placing a prediction head on top
|
||||
of the final hidden state of the class token and not using the distillation signal, or (2) by placing both a
|
||||
prediction head on top of the class token and on top of the distillation token. In that case, the [CLS] prediction
|
||||
head is trained using regular cross-entropy between the prediction of the head and the ground-truth label, while the
|
||||
distillation prediction head is trained using hard distillation (cross-entropy between the prediction of the
|
||||
distillation head and the label predicted by the teacher). At inference time, one takes the average prediction
|
||||
between both heads as final prediction. (2) is also called "fine-tuning with distillation", because one relies on a
|
||||
teacher that has already been fine-tuned on the downstream dataset. In terms of models, (1) corresponds to
|
||||
:class:`~transformers.DeiTForImageClassification` and (2) corresponds to
|
||||
:class:`~transformers.DeiTForImageClassificationWithTeacher`.
|
||||
- Note that the authors also did try soft distillation for (2) (in which case the distillation prediction head is
|
||||
trained using KL divergence to match the softmax output of the teacher), but hard distillation gave the best results.
|
||||
- All released checkpoints were pre-trained and fine-tuned on ImageNet-1k only. No external data was used. This is in
|
||||
contrast with the original ViT model, which used external data like the JFT-300M dataset/Imagenet-21k for
|
||||
pre-training.
|
||||
- The authors of DeiT also released more efficiently trained ViT models, which you can directly plug into
|
||||
:class:`~transformers.ViTModel` or :class:`~transformers.ViTForImageClassification`. Techniques like data
|
||||
augmentation, optimization, and regularization were used in order to simulate training on a much larger dataset
|
||||
(while only using ImageNet-1k for pre-training). There are 4 variants available (in 3 different sizes):
|
||||
`facebook/deit-tiny-patch16-224`, `facebook/deit-small-patch16-224`, `facebook/deit-base-patch16-224` and
|
||||
`facebook/deit-base-patch16-384`. Note that one should use :class:`~transformers.DeiTFeatureExtractor` in order to
|
||||
prepare images for the model.
|
||||
|
||||
This model was contributed by `nielsr <https://huggingface.co/nielsr>`__.
|
||||
|
||||
|
||||
DeiTConfig
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.DeiTConfig
|
||||
:members:
|
||||
|
||||
|
||||
DeiTFeatureExtractor
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.DeiTFeatureExtractor
|
||||
:members: __call__
|
||||
|
||||
|
||||
DeiTModel
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.DeiTModel
|
||||
:members: forward
|
||||
|
||||
|
||||
DeiTForImageClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.DeiTForImageClassification
|
||||
:members: forward
|
||||
|
||||
|
||||
DeiTForImageClassificationWithTeacher
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.DeiTForImageClassificationWithTeacher
|
||||
:members: forward
|
@ -1,3 +1,15 @@
|
||||
..
|
||||
Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
DialoGPT
|
||||
-----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
@ -36,7 +48,6 @@ modeling. We first concatenate all dialog turns within a dialogue session into a
|
||||
sequence length), ended by the end-of-text token.* For more information please confer to the original paper.
|
||||
|
||||
|
||||
DialoGPT's architecture is based on the GPT2 model, so one can refer to GPT2's `docstring
|
||||
<https://huggingface.co/transformers/model_doc/gpt2.html>`_.
|
||||
DialoGPT's architecture is based on the GPT2 model, so one can refer to :doc:`GPT2's documentation page <gpt2>`.
|
||||
|
||||
The original code can be found `here <https://github.com/microsoft/DialoGPT>`_.
|
||||
|
@ -1,3 +1,15 @@
|
||||
..
|
||||
Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
DistilBERT
|
||||
-----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
@ -18,9 +30,9 @@ operating these large models in on-the-edge and/or under constrained computation
|
||||
remains challenging. In this work, we propose a method to pre-train a smaller general-purpose language representation
|
||||
model, called DistilBERT, which can then be fine-tuned with good performances on a wide range of tasks like its larger
|
||||
counterparts. While most prior work investigated the use of distillation for building task-specific models, we leverage
|
||||
knowledge distillation during the pre-training phase and show that it is possible to reduce the size of a BERT model by
|
||||
knowledge distillation during the pretraining phase and show that it is possible to reduce the size of a BERT model by
|
||||
40%, while retaining 97% of its language understanding capabilities and being 60% faster. To leverage the inductive
|
||||
biases learned by larger models during pre-training, we introduce a triple loss combining language modeling,
|
||||
biases learned by larger models during pretraining, we introduce a triple loss combining language modeling,
|
||||
distillation and cosine-distance losses. Our smaller, faster and lighter model is cheaper to pre-train and we
|
||||
demonstrate its capabilities for on-device computations in a proof-of-concept experiment and a comparative on-device
|
||||
study.*
|
||||
@ -32,8 +44,8 @@ Tips:
|
||||
- DistilBERT doesn't have options to select the input positions (:obj:`position_ids` input). This could be added if
|
||||
necessary though, just let us know if you need this option.
|
||||
|
||||
The original code can be found `here
|
||||
<https://github.com/huggingface/transformers/tree/master/examples/distillation>`__.
|
||||
This model was contributed by `victorsanh <https://huggingface.co/victorsanh>`__. The original code can be found
|
||||
:prefix_link:`here <examples/research-projects/distillation>`.
|
||||
|
||||
|
||||
DistilBertConfig
|
||||
|
@ -1,3 +1,15 @@
|
||||
..
|
||||
Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
DPR
|
||||
-----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
@ -5,7 +17,7 @@ Overview
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Dense Passage Retrieval (DPR) is a set of tools and models for state-of-the-art open-domain Q&A research. It was
|
||||
intorduced in `Dense Passage Retrieval for Open-Domain Question Answering <https://arxiv.org/abs/2004.04906>`__ by
|
||||
introduced in `Dense Passage Retrieval for Open-Domain Question Answering <https://arxiv.org/abs/2004.04906>`__ by
|
||||
Vladimir Karpukhin, Barlas Oğuz, Sewon Min, Patrick Lewis, Ledell Wu, Sergey Edunov, Danqi Chen, Wen-tau Yih.
|
||||
|
||||
The abstract from the paper is the following:
|
||||
@ -18,7 +30,8 @@ our dense retriever outperforms a strong Lucene-BM25 system largely by 9%-19% ab
|
||||
retrieval accuracy, and helps our end-to-end QA system establish new state-of-the-art on multiple open-domain QA
|
||||
benchmarks.*
|
||||
|
||||
The original code can be found `here <https://github.com/facebookresearch/DPR>`__.
|
||||
This model was contributed by `lhoestq <https://huggingface.co/lhoestq>`__. The original code can be found `here
|
||||
<https://github.com/facebookresearch/DPR>`__.
|
||||
|
||||
|
||||
DPRConfig
|
||||
|
@ -1,3 +1,15 @@
|
||||
..
|
||||
Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
ELECTRA
|
||||
-----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
@ -12,14 +24,14 @@ identify which tokens were replaced by the generator in the sequence.
|
||||
|
||||
The abstract from the paper is the following:
|
||||
|
||||
*Masked language modeling (MLM) pre-training methods such as BERT corrupt the input by replacing some tokens with
|
||||
[MASK] and then train a model to reconstruct the original tokens. While they produce good results when transferred to
|
||||
*Masked language modeling (MLM) pretraining methods such as BERT corrupt the input by replacing some tokens with [MASK]
|
||||
and then train a model to reconstruct the original tokens. While they produce good results when transferred to
|
||||
downstream NLP tasks, they generally require large amounts of compute to be effective. As an alternative, we propose a
|
||||
more sample-efficient pre-training task called replaced token detection. Instead of masking the input, our approach
|
||||
more sample-efficient pretraining task called replaced token detection. Instead of masking the input, our approach
|
||||
corrupts it by replacing some tokens with plausible alternatives sampled from a small generator network. Then, instead
|
||||
of training a model that predicts the original identities of the corrupted tokens, we train a discriminative model that
|
||||
predicts whether each token in the corrupted input was replaced by a generator sample or not. Thorough experiments
|
||||
demonstrate this new pre-training task is more efficient than MLM because the task is defined over all input tokens
|
||||
demonstrate this new pretraining task is more efficient than MLM because the task is defined over all input tokens
|
||||
rather than just the small subset that was masked out. As a result, the contextual representations learned by our
|
||||
approach substantially outperform the ones learned by BERT given the same model size, data, and compute. The gains are
|
||||
particularly strong for small models; for example, we train a model on one GPU for 4 days that outperforms GPT (trained
|
||||
@ -42,7 +54,8 @@ Tips:
|
||||
:class:`~transformers.ElectraForPreTraining` model (the classification head will be randomly initialized as it
|
||||
doesn't exist in the generator).
|
||||
|
||||
The original code can be found `here <https://github.com/google-research/electra>`__.
|
||||
This model was contributed by `lysandre <https://huggingface.co/lysandre>`__. The original code can be found `here
|
||||
<https://github.com/google-research/electra>`__.
|
||||
|
||||
|
||||
ElectraConfig
|
||||
@ -172,3 +185,52 @@ TFElectraForQuestionAnswering
|
||||
|
||||
.. autoclass:: transformers.TFElectraForQuestionAnswering
|
||||
:members: call
|
||||
|
||||
|
||||
FlaxElectraModel
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.FlaxElectraModel
|
||||
:members: __call__
|
||||
|
||||
|
||||
FlaxElectraForPreTraining
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.FlaxElectraForPreTraining
|
||||
:members: __call__
|
||||
|
||||
|
||||
FlaxElectraForMaskedLM
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.FlaxElectraForMaskedLM
|
||||
:members: __call__
|
||||
|
||||
|
||||
FlaxElectraForSequenceClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.FlaxElectraForSequenceClassification
|
||||
:members: __call__
|
||||
|
||||
|
||||
FlaxElectraForMultipleChoice
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.FlaxElectraForMultipleChoice
|
||||
:members: __call__
|
||||
|
||||
|
||||
FlaxElectraForTokenClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.FlaxElectraForTokenClassification
|
||||
:members: __call__
|
||||
|
||||
|
||||
FlaxElectraForQuestionAnswering
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.FlaxElectraForQuestionAnswering
|
||||
:members: __call__
|
||||
|
@ -1,3 +1,15 @@
|
||||
..
|
||||
Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
Encoder Decoder Models
|
||||
-----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
|
@ -1,3 +1,15 @@
|
||||
..
|
||||
Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
FlauBERT
|
||||
-----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
@ -19,11 +31,12 @@ representations (Dai and Le, 2015; Peters et al., 2018; Howard and Ruder, 2018;
|
||||
heterogeneous French corpus. Models of different sizes are trained using the new CNRS (French National Centre for
|
||||
Scientific Research) Jean Zay supercomputer. We apply our French language models to diverse NLP tasks (text
|
||||
classification, paraphrasing, natural language inference, parsing, word sense disambiguation) and show that most of the
|
||||
time they outperform other pre-training approaches. Different versions of FlauBERT as well as a unified evaluation
|
||||
time they outperform other pretraining approaches. Different versions of FlauBERT as well as a unified evaluation
|
||||
protocol for the downstream tasks, called FLUE (French Language Understanding Evaluation), are shared to the research
|
||||
community for further reproducible experiments in French NLP.*
|
||||
|
||||
The original code can be found `here <https://github.com/getalp/Flaubert>`__.
|
||||
This model was contributed by `formiel <https://huggingface.co/formiel>`__. The original code can be found `here
|
||||
<https://github.com/getalp/Flaubert>`__.
|
||||
|
||||
|
||||
FlaubertConfig
|
||||
|
@ -1,3 +1,15 @@
|
||||
..
|
||||
Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
FSMT
|
||||
-----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
@ -22,7 +34,8 @@ data, then decode using noisy channel model reranking. Our submissions are ranke
|
||||
human evaluation campaign. On En->De, our system significantly outperforms other systems as well as human translations.
|
||||
This system improves upon our WMT'18 submission by 4.5 BLEU points.*
|
||||
|
||||
The original code can be found here <https://github.com/pytorch/fairseq/tree/master/examples/wmt19>__.
|
||||
This model was contributed by `stas <https://huggingface.co/stas>`__. The original code can be found here
|
||||
<https://github.com/pytorch/fairseq/tree/master/examples/wmt19>__.
|
||||
|
||||
Implementation Notes
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
@ -44,7 +57,7 @@ FSMTTokenizer
|
||||
|
||||
.. autoclass:: transformers.FSMTTokenizer
|
||||
:members: build_inputs_with_special_tokens, get_special_tokens_mask,
|
||||
create_token_type_ids_from_sequences, prepare_seq2seq_batch, save_vocabulary
|
||||
create_token_type_ids_from_sequences, save_vocabulary
|
||||
|
||||
|
||||
FSMTModel
|
||||
|
@ -1,3 +1,15 @@
|
||||
..
|
||||
Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
Funnel Transformer
|
||||
-----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
@ -37,7 +49,8 @@ Tips:
|
||||
:class:`~transformers.FunnelBaseModel`, :class:`~transformers.FunnelForSequenceClassification` and
|
||||
:class:`~transformers.FunnelForMultipleChoice`.
|
||||
|
||||
The original code can be found `here <https://github.com/laiguokun/Funnel-Transformer>`__.
|
||||
This model was contributed by `sgugger <https://huggingface.co/sgugger>`__. The original code can be found `here
|
||||
<https://github.com/laiguokun/Funnel-Transformer>`__.
|
||||
|
||||
|
||||
FunnelConfig
|
||||
|
@ -1,3 +1,15 @@
|
||||
..
|
||||
Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
OpenAI GPT
|
||||
-----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
@ -14,7 +26,7 @@ The abstract from the paper is the following:
|
||||
*Natural language understanding comprises a wide range of diverse tasks such as textual entailment, question answering,
|
||||
semantic similarity assessment, and document classification. Although large unlabeled text corpora are abundant,
|
||||
labeled data for learning these specific tasks is scarce, making it challenging for discriminatively trained models to
|
||||
perform adequately. We demonstrate that large gains on these tasks can be realized by generative pre-training of a
|
||||
perform adequately. We demonstrate that large gains on these tasks can be realized by generative pretraining of a
|
||||
language model on a diverse corpus of unlabeled text, followed by discriminative fine-tuning on each specific task. In
|
||||
contrast to previous approaches, we make use of task-aware input transformations during fine-tuning to achieve
|
||||
effective transfer while requiring minimal changes to the model architecture. We demonstrate the effectiveness of our
|
||||
@ -33,12 +45,13 @@ Tips:
|
||||
`Write With Transformer <https://transformer.huggingface.co/doc/gpt>`__ is a webapp created and hosted by Hugging Face
|
||||
showcasing the generative capabilities of several models. GPT is one of them.
|
||||
|
||||
The original code can be found `here <https://github.com/openai/finetune-transformer-lm>`__.
|
||||
This model was contributed by `thomwolf <https://huggingface.co/thomwolf>`__. The original code can be found `here
|
||||
<https://github.com/openai/finetune-transformer-lm>`__.
|
||||
|
||||
Note:
|
||||
|
||||
If you want to reproduce the original tokenization process of the `OpenAI GPT` paper, you will need to install ``ftfy``
|
||||
and ``SpaCy``::
|
||||
and ``SpaCy``:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
@ -126,3 +139,9 @@ TFOpenAIGPTDoubleHeadsModel
|
||||
|
||||
.. autoclass:: transformers.TFOpenAIGPTDoubleHeadsModel
|
||||
:members: call
|
||||
|
||||
TFOpenAIGPTForSequenceClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.TFOpenAIGPTForSequenceClassification
|
||||
:members: call
|
||||
|
@ -1,3 +1,15 @@
|
||||
..
|
||||
Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
OpenAI GPT2
|
||||
-----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
@ -33,7 +45,8 @@ Tips:
|
||||
Hugging Face showcasing the generative capabilities of several models. GPT-2 is one of them and is available in five
|
||||
different sizes: small, medium, large, xl and a distilled version of the small checkpoint: `distilgpt-2`.
|
||||
|
||||
The original code can be found `here <https://openai.com/blog/better-language-models/>`__.
|
||||
This model was contributed by `thomwolf <https://huggingface.co/thomwolf>`__. The original code can be found `here
|
||||
<https://openai.com/blog/better-language-models/>`__.
|
||||
|
||||
|
||||
GPT2Config
|
||||
@ -71,14 +84,14 @@ GPT2Model
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.GPT2Model
|
||||
:members: forward
|
||||
:members: forward, parallelize, deparallelize
|
||||
|
||||
|
||||
GPT2LMHeadModel
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.GPT2LMHeadModel
|
||||
:members: forward
|
||||
:members: forward, parallelize, deparallelize
|
||||
|
||||
|
||||
GPT2DoubleHeadsModel
|
||||
@ -114,3 +127,15 @@ TFGPT2DoubleHeadsModel
|
||||
|
||||
.. autoclass:: transformers.TFGPT2DoubleHeadsModel
|
||||
:members: call
|
||||
|
||||
TFGPT2ForSequenceClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.TFGPT2ForSequenceClassification
|
||||
:members: call
|
||||
|
||||
TFSequenceClassifierOutputWithPast
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.modeling_tf_outputs.TFSequenceClassifierOutputWithPast
|
||||
:members:
|
||||
|
67
docs/source/model_doc/gpt_neo.rst
Normal file
67
docs/source/model_doc/gpt_neo.rst
Normal file
@ -0,0 +1,67 @@
|
||||
..
|
||||
Copyright 2021 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
GPT Neo
|
||||
-----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
Overview
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The GPTNeo model was released in the `EleutherAI/gpt-neo <https://github.com/EleutherAI/gpt-neo>`__ repository by Sid
|
||||
Black, Stella Biderman, Leo Gao, Phil Wang and Connor Leahy. It is a GPT2 like causal language model trained on the
|
||||
`Pile <https://pile.eleuther.ai/>`__ dataset.
|
||||
|
||||
The architecture is similar to GPT2 except that GPT Neo uses local attention in every other layer with a window size of
|
||||
256 tokens.
|
||||
|
||||
This model was contributed by `valhalla <https://huggingface.co/valhalla>`__.
|
||||
|
||||
Generation
|
||||
_______________________________________________________________________________________________________________________
|
||||
|
||||
The :obj:`generate()` method can be used to generate text using GPT Neo model.
|
||||
|
||||
.. code-block::
|
||||
|
||||
>>> from transformers import GPTNeoForCausalLM, GPT2Tokenizer
|
||||
>>> model = GPTNeoForCausalLM.from_pretrained("EleutherAI/gpt-neo-1.3B")
|
||||
>>> tokenizer = GPT2Tokenizer.from_pretrained("EleutherAI/gpt-neo-1.3B")
|
||||
|
||||
>>> prompt = "In a shocking finding, scientists discovered a herd of unicorns living in a remote, " \
|
||||
... "previously unexplored valley, in the Andes Mountains. Even more surprising to the " \
|
||||
... "researchers was the fact that the unicorns spoke perfect English."
|
||||
|
||||
>>> input_ids = tokenizer(prompt, return_tensors="pt").input_ids
|
||||
|
||||
>>> gen_tokens = model.generate(input_ids, do_sample=True, temperature=0.9, max_length=100,)
|
||||
>>> gen_text = tokenizer.batch_decode(gen_tokens)[0]
|
||||
|
||||
|
||||
GPTNeoConfig
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.GPTNeoConfig
|
||||
:members:
|
||||
|
||||
|
||||
GPTNeoModel
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.GPTNeoModel
|
||||
:members: forward
|
||||
|
||||
|
||||
GPTNeoForCausalLM
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.GPTNeoForCausalLM
|
||||
:members: forward
|
73
docs/source/model_doc/herbert.rst
Normal file
73
docs/source/model_doc/herbert.rst
Normal file
@ -0,0 +1,73 @@
|
||||
..
|
||||
Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
herBERT
|
||||
-----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
Overview
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The herBERT model was proposed in `KLEJ: Comprehensive Benchmark for Polish Language Understanding
|
||||
<https://www.aclweb.org/anthology/2020.acl-main.111.pdf>`__ by Piotr Rybak, Robert Mroczkowski, Janusz Tracz, and
|
||||
Ireneusz Gawlik. It is a BERT-based Language Model trained on Polish Corpora using only MLM objective with dynamic
|
||||
masking of whole words.
|
||||
|
||||
The abstract from the paper is the following:
|
||||
|
||||
*In recent years, a series of Transformer-based models unlocked major improvements in general natural language
|
||||
understanding (NLU) tasks. Such a fast pace of research would not be possible without general NLU benchmarks, which
|
||||
allow for a fair comparison of the proposed methods. However, such benchmarks are available only for a handful of
|
||||
languages. To alleviate this issue, we introduce a comprehensive multi-task benchmark for the Polish language
|
||||
understanding, accompanied by an online leaderboard. It consists of a diverse set of tasks, adopted from existing
|
||||
datasets for named entity recognition, question-answering, textual entailment, and others. We also introduce a new
|
||||
sentiment analysis task for the e-commerce domain, named Allegro Reviews (AR). To ensure a common evaluation scheme and
|
||||
promote models that generalize to different NLU tasks, the benchmark includes datasets from varying domains and
|
||||
applications. Additionally, we release HerBERT, a Transformer-based model trained specifically for the Polish language,
|
||||
which has the best average performance and obtains the best results for three out of nine tasks. Finally, we provide an
|
||||
extensive evaluation, including several standard baselines and recently proposed, multilingual Transformer-based
|
||||
models.*
|
||||
|
||||
Examples of use:
|
||||
|
||||
.. code-block::
|
||||
|
||||
>>> from transformers import HerbertTokenizer, RobertaModel
|
||||
|
||||
>>> tokenizer = HerbertTokenizer.from_pretrained("allegro/herbert-klej-cased-tokenizer-v1")
|
||||
>>> model = RobertaModel.from_pretrained("allegro/herbert-klej-cased-v1")
|
||||
|
||||
>>> encoded_input = tokenizer.encode("Kto ma lepszą sztukę, ma lepszy rząd – to jasne.", return_tensors='pt')
|
||||
>>> outputs = model(encoded_input)
|
||||
|
||||
>>> # HerBERT can also be loaded using AutoTokenizer and AutoModel:
|
||||
>>> import torch
|
||||
>>> from transformers import AutoModel, AutoTokenizer
|
||||
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("allegro/herbert-klej-cased-tokenizer-v1")
|
||||
>>> model = AutoModel.from_pretrained("allegro/herbert-klej-cased-v1")
|
||||
|
||||
|
||||
This model was contributed by `rmroczkowski <https://huggingface.co/rmroczkowski>`__. The original code can be found
|
||||
`here <https://github.com/allegro/HerBERT>`__.
|
||||
|
||||
|
||||
HerbertTokenizer
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.HerbertTokenizer
|
||||
:members:
|
||||
|
||||
HerbertTokenizerFast
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.HerbertTokenizerFast
|
||||
:members:
|
89
docs/source/model_doc/ibert.rst
Normal file
89
docs/source/model_doc/ibert.rst
Normal file
@ -0,0 +1,89 @@
|
||||
..
|
||||
Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
I-BERT
|
||||
-----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
Overview
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The I-BERT model was proposed in `I-BERT: Integer-only BERT Quantization <https://arxiv.org/abs/2101.01321>`__ by
|
||||
Sehoon Kim, Amir Gholami, Zhewei Yao, Michael W. Mahoney and Kurt Keutzer. It's a quantized version of RoBERTa running
|
||||
inference up to four times faster.
|
||||
|
||||
The abstract from the paper is the following:
|
||||
|
||||
*Transformer based models, like BERT and RoBERTa, have achieved state-of-the-art results in many Natural Language
|
||||
Processing tasks. However, their memory footprint, inference latency, and power consumption are prohibitive for
|
||||
efficient inference at the edge, and even at the data center. While quantization can be a viable solution for this,
|
||||
previous work on quantizing Transformer based models use floating-point arithmetic during inference, which cannot
|
||||
efficiently utilize integer-only logical units such as the recent Turing Tensor Cores, or traditional integer-only ARM
|
||||
processors. In this work, we propose I-BERT, a novel quantization scheme for Transformer based models that quantizes
|
||||
the entire inference with integer-only arithmetic. Based on lightweight integer-only approximation methods for
|
||||
nonlinear operations, e.g., GELU, Softmax, and Layer Normalization, I-BERT performs an end-to-end integer-only BERT
|
||||
inference without any floating point calculation. We evaluate our approach on GLUE downstream tasks using
|
||||
RoBERTa-Base/Large. We show that for both cases, I-BERT achieves similar (and slightly higher) accuracy as compared to
|
||||
the full-precision baseline. Furthermore, our preliminary implementation of I-BERT shows a speedup of 2.4 - 4.0x for
|
||||
INT8 inference on a T4 GPU system as compared to FP32 inference. The framework has been developed in PyTorch and has
|
||||
been open-sourced.*
|
||||
|
||||
This model was contributed by `kssteven <https://huggingface.co/kssteven>`__. The original code can be found `here
|
||||
<https://github.com/kssteven418/I-BERT>`__.
|
||||
|
||||
|
||||
IBertConfig
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.IBertConfig
|
||||
:members:
|
||||
|
||||
|
||||
IBertModel
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.IBertModel
|
||||
:members: forward
|
||||
|
||||
|
||||
IBertForMaskedLM
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.IBertForMaskedLM
|
||||
:members: forward
|
||||
|
||||
|
||||
IBertForSequenceClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.IBertForSequenceClassification
|
||||
:members: forward
|
||||
|
||||
|
||||
IBertForMultipleChoice
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.IBertForMultipleChoice
|
||||
:members: forward
|
||||
|
||||
|
||||
IBertForTokenClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.IBertForTokenClassification
|
||||
:members: forward
|
||||
|
||||
|
||||
IBertForQuestionAnswering
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.IBertForQuestionAnswering
|
||||
:members: forward
|
@ -1,34 +1,87 @@
|
||||
..
|
||||
Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
LayoutLM
|
||||
-----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
.. _Overview:
|
||||
|
||||
Overview
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The LayoutLM model was proposed in the paper `LayoutLM: Pre-training of Text and Layout for Document Image
|
||||
Understanding <https://arxiv.org/abs/1912.13318>`__ by Yiheng Xu, Minghao Li, Lei Cui, Shaohan Huang, Furu Wei, and
|
||||
Ming Zhou. It's a simple but effective pre-training method of text and layout for document image understanding and
|
||||
information extraction tasks, such as form understanding and receipt understanding.
|
||||
Ming Zhou. It's a simple but effective pretraining method of text and layout for document image understanding and
|
||||
information extraction tasks, such as form understanding and receipt understanding. It obtains state-of-the-art results
|
||||
on several downstream tasks:
|
||||
|
||||
- form understanding: the `FUNSD <https://guillaumejaume.github.io/FUNSD/>`__ dataset (a collection of 199 annotated
|
||||
forms comprising more than 30,000 words).
|
||||
- receipt understanding: the `SROIE <https://rrc.cvc.uab.es/?ch=13>`__ dataset (a collection of 626 receipts for
|
||||
training and 347 receipts for testing).
|
||||
- document image classification: the `RVL-CDIP <https://www.cs.cmu.edu/~aharley/rvl-cdip/>`__ dataset (a collection of
|
||||
400,000 images belonging to one of 16 classes).
|
||||
|
||||
The abstract from the paper is the following:
|
||||
|
||||
*Pre-training techniques have been verified successfully in a variety of NLP tasks in recent years. Despite the
|
||||
widespread use of pre-training models for NLP applications, they almost exclusively focus on text-level manipulation,
|
||||
widespread use of pretraining models for NLP applications, they almost exclusively focus on text-level manipulation,
|
||||
while neglecting layout and style information that is vital for document image understanding. In this paper, we propose
|
||||
the \textbf{LayoutLM} to jointly model interactions between text and layout information across scanned document images,
|
||||
which is beneficial for a great number of real-world document image understanding tasks such as information extraction
|
||||
from scanned documents. Furthermore, we also leverage image features to incorporate words' visual information into
|
||||
LayoutLM. To the best of our knowledge, this is the first time that text and layout are jointly learned in a single
|
||||
framework for document-level pre-training. It achieves new state-of-the-art results in several downstream tasks,
|
||||
including form understanding (from 70.72 to 79.27), receipt understanding (from 94.02 to 95.24) and document image
|
||||
classification (from 93.07 to 94.42).*
|
||||
the LayoutLM to jointly model interactions between text and layout information across scanned document images, which is
|
||||
beneficial for a great number of real-world document image understanding tasks such as information extraction from
|
||||
scanned documents. Furthermore, we also leverage image features to incorporate words' visual information into LayoutLM.
|
||||
To the best of our knowledge, this is the first time that text and layout are jointly learned in a single framework for
|
||||
document-level pretraining. It achieves new state-of-the-art results in several downstream tasks, including form
|
||||
understanding (from 70.72 to 79.27), receipt understanding (from 94.02 to 95.24) and document image classification
|
||||
(from 93.07 to 94.42).*
|
||||
|
||||
Tips:
|
||||
|
||||
- LayoutLM has an extra input called :obj:`bbox`, which is the bounding boxes of the input tokens.
|
||||
- The :obj:`bbox` requires the data that on 0-1000 scale, which means you should normalize the bounding box before
|
||||
passing them into model.
|
||||
- In addition to `input_ids`, :meth:`~transformer.LayoutLMModel.forward` also expects the input :obj:`bbox`, which are
|
||||
the bounding boxes (i.e. 2D-positions) of the input tokens. These can be obtained using an external OCR engine such
|
||||
as Google's `Tesseract <https://github.com/tesseract-ocr/tesseract>`__ (there's a `Python wrapper
|
||||
<https://pypi.org/project/pytesseract/>`__ available). Each bounding box should be in (x0, y0, x1, y1) format, where
|
||||
(x0, y0) corresponds to the position of the upper left corner in the bounding box, and (x1, y1) represents the
|
||||
position of the lower right corner. Note that one first needs to normalize the bounding boxes to be on a 0-1000
|
||||
scale. To normalize, you can use the following function:
|
||||
|
||||
The original code can be found `here <https://github.com/microsoft/unilm/tree/master/layoutlm>`_.
|
||||
.. code-block::
|
||||
|
||||
def normalize_bbox(bbox, width, height):
|
||||
return [
|
||||
int(1000 * (bbox[0] / width)),
|
||||
int(1000 * (bbox[1] / height)),
|
||||
int(1000 * (bbox[2] / width)),
|
||||
int(1000 * (bbox[3] / height)),
|
||||
]
|
||||
|
||||
Here, :obj:`width` and :obj:`height` correspond to the width and height of the original document in which the token
|
||||
occurs. Those can be obtained using the Python Image Library (PIL) library for example, as follows:
|
||||
|
||||
.. code-block::
|
||||
|
||||
from PIL import Image
|
||||
|
||||
image = Image.open("name_of_your_document - can be a png file, pdf, etc.")
|
||||
|
||||
width, height = image.size
|
||||
|
||||
- For a demo which shows how to fine-tune :class:`LayoutLMForTokenClassification` on the `FUNSD dataset
|
||||
<https://guillaumejaume.github.io/FUNSD/>`__ (a collection of annotated forms), see `this notebook
|
||||
<https://github.com/NielsRogge/Transformers-Tutorials/blob/master/LayoutLM/Fine_tuning_LayoutLMForTokenClassification_on_FUNSD.ipynb>`__.
|
||||
It includes an inference part, which shows how to use Google's Tesseract on a new document.
|
||||
|
||||
This model was contributed by `liminghao1630 <https://huggingface.co/liminghao1630>`__. The original code can be found
|
||||
`here <https://github.com/microsoft/unilm/tree/master/layoutlm>`_.
|
||||
|
||||
|
||||
LayoutLMConfig
|
||||
@ -45,6 +98,13 @@ LayoutLMTokenizer
|
||||
:members:
|
||||
|
||||
|
||||
LayoutLMTokenizerFast
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.LayoutLMTokenizerFast
|
||||
:members:
|
||||
|
||||
|
||||
LayoutLMModel
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
@ -59,8 +119,43 @@ LayoutLMForMaskedLM
|
||||
:members:
|
||||
|
||||
|
||||
LayoutLMForSequenceClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.LayoutLMForSequenceClassification
|
||||
:members:
|
||||
|
||||
|
||||
LayoutLMForTokenClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.LayoutLMForTokenClassification
|
||||
:members:
|
||||
|
||||
|
||||
TFLayoutLMModel
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.TFLayoutLMModel
|
||||
:members:
|
||||
|
||||
|
||||
TFLayoutLMForMaskedLM
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.TFLayoutLMForMaskedLM
|
||||
:members:
|
||||
|
||||
|
||||
TFLayoutLMForSequenceClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.TFLayoutLMForSequenceClassification
|
||||
:members:
|
||||
|
||||
|
||||
TFLayoutLMForTokenClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.TFLayoutLMForTokenClassification
|
||||
:members:
|
||||
|
150
docs/source/model_doc/led.rst
Normal file
150
docs/source/model_doc/led.rst
Normal file
@ -0,0 +1,150 @@
|
||||
..
|
||||
Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
LED
|
||||
-----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
Overview
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The LED model was proposed in `Longformer: The Long-Document Transformer <https://arxiv.org/abs/2004.05150>`__ by Iz
|
||||
Beltagy, Matthew E. Peters, Arman Cohan.
|
||||
|
||||
The abstract from the paper is the following:
|
||||
|
||||
*Transformer-based models are unable to process long sequences due to their self-attention operation, which scales
|
||||
quadratically with the sequence length. To address this limitation, we introduce the Longformer with an attention
|
||||
mechanism that scales linearly with sequence length, making it easy to process documents of thousands of tokens or
|
||||
longer. Longformer's attention mechanism is a drop-in replacement for the standard self-attention and combines a local
|
||||
windowed attention with a task motivated global attention. Following prior work on long-sequence transformers, we
|
||||
evaluate Longformer on character-level language modeling and achieve state-of-the-art results on text8 and enwik8. In
|
||||
contrast to most prior work, we also pretrain Longformer and finetune it on a variety of downstream tasks. Our
|
||||
pretrained Longformer consistently outperforms RoBERTa on long document tasks and sets new state-of-the-art results on
|
||||
WikiHop and TriviaQA. We finally introduce the Longformer-Encoder-Decoder (LED), a Longformer variant for supporting
|
||||
long document generative sequence-to-sequence tasks, and demonstrate its effectiveness on the arXiv summarization
|
||||
dataset.*
|
||||
|
||||
Tips:
|
||||
|
||||
- :class:`~transformers.LEDForConditionalGeneration` is an extension of
|
||||
:class:`~transformers.BartForConditionalGeneration` exchanging the traditional *self-attention* layer with
|
||||
*Longformer*'s *chunked self-attention* layer. :class:`~transformers.LEDTokenizer` is an alias of
|
||||
:class:`~transformers.BartTokenizer`.
|
||||
- LED works very well on long-range *sequence-to-sequence* tasks where the ``input_ids`` largely exceed a length of
|
||||
1024 tokens.
|
||||
- LED pads the ``input_ids`` to be a multiple of ``config.attention_window`` if required. Therefore a small speed-up is
|
||||
gained, when :class:`~transformers.LEDTokenizer` is used with the ``pad_to_multiple_of`` argument.
|
||||
- LED makes use of *global attention* by means of the ``global_attention_mask`` (see
|
||||
:class:`~transformers.LongformerModel`). For summarization, it is advised to put *global attention* only on the first
|
||||
``<s>`` token. For question answering, it is advised to put *global attention* on all tokens of the question.
|
||||
- To fine-tune LED on all 16384, it is necessary to enable *gradient checkpointing* by setting
|
||||
``config.gradient_checkpointing = True``.
|
||||
- A notebook showing how to evaluate LED, can be accessed `here
|
||||
<https://colab.research.google.com/drive/12INTTR6n64TzS4RrXZxMSXfrOd9Xzamo?usp=sharing>`__.
|
||||
- A notebook showing how to fine-tune LED, can be accessed `here
|
||||
<https://colab.research.google.com/drive/12LjJazBl7Gam0XBPy_y0CTOJZeZ34c2v?usp=sharing>`__.
|
||||
|
||||
This model was contributed by `patrickvonplaten <https://huggingface.co/patrickvonplaten>`__.
|
||||
|
||||
|
||||
LEDConfig
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.LEDConfig
|
||||
:members:
|
||||
|
||||
|
||||
LEDTokenizer
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.LEDTokenizer
|
||||
:members: build_inputs_with_special_tokens, get_special_tokens_mask,
|
||||
create_token_type_ids_from_sequences, save_vocabulary
|
||||
|
||||
|
||||
LEDTokenizerFast
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.LEDTokenizerFast
|
||||
:members:
|
||||
|
||||
|
||||
LED specific outputs
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.models.led.modeling_led.LEDEncoderBaseModelOutput
|
||||
:members:
|
||||
|
||||
.. autoclass:: transformers.models.led.modeling_led.LEDSeq2SeqModelOutput
|
||||
:members:
|
||||
|
||||
.. autoclass:: transformers.models.led.modeling_led.LEDSeq2SeqLMOutput
|
||||
:members:
|
||||
|
||||
.. autoclass:: transformers.models.led.modeling_led.LEDSeq2SeqSequenceClassifierOutput
|
||||
:members:
|
||||
|
||||
.. autoclass:: transformers.models.led.modeling_led.LEDSeq2SeqQuestionAnsweringModelOutput
|
||||
:members:
|
||||
|
||||
.. autoclass:: transformers.models.led.modeling_tf_led.TFLEDEncoderBaseModelOutput
|
||||
:members:
|
||||
|
||||
.. autoclass:: transformers.models.led.modeling_tf_led.TFLEDSeq2SeqModelOutput
|
||||
:members:
|
||||
|
||||
.. autoclass:: transformers.models.led.modeling_tf_led.TFLEDSeq2SeqLMOutput
|
||||
:members:
|
||||
|
||||
|
||||
|
||||
|
||||
LEDModel
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.LEDModel
|
||||
:members: forward
|
||||
|
||||
|
||||
LEDForConditionalGeneration
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.LEDForConditionalGeneration
|
||||
:members: forward
|
||||
|
||||
|
||||
LEDForSequenceClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.LEDForSequenceClassification
|
||||
:members: forward
|
||||
|
||||
|
||||
LEDForQuestionAnswering
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.LEDForQuestionAnswering
|
||||
:members: forward
|
||||
|
||||
|
||||
TFLEDModel
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.TFLEDModel
|
||||
:members: call
|
||||
|
||||
|
||||
TFLEDForConditionalGeneration
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.TFLEDForConditionalGeneration
|
||||
:members: call
|
@ -1,3 +1,15 @@
|
||||
..
|
||||
Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
Longformer
|
||||
-----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
@ -22,7 +34,14 @@ contrast to most prior work, we also pretrain Longformer and finetune it on a va
|
||||
pretrained Longformer consistently outperforms RoBERTa on long document tasks and sets new state-of-the-art results on
|
||||
WikiHop and TriviaQA.*
|
||||
|
||||
The Authors' code can be found `here <https://github.com/allenai/longformer>`__.
|
||||
Tips:
|
||||
|
||||
- Since the Longformer is based on RoBERTa, it doesn't have :obj:`token_type_ids`. You don't need to indicate which
|
||||
token belongs to which segment. Just separate your segments with the separation token :obj:`tokenizer.sep_token` (or
|
||||
:obj:`</s>`).
|
||||
|
||||
This model was contributed by `beltagy <https://huggingface.co/beltagy>`__. The Authors' code can be found `here
|
||||
<https://github.com/allenai/longformer>`__.
|
||||
|
||||
Longformer Self Attention
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
159
docs/source/model_doc/luke.rst
Normal file
159
docs/source/model_doc/luke.rst
Normal file
@ -0,0 +1,159 @@
|
||||
..
|
||||
Copyright 2021 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
LUKE
|
||||
-----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
Overview
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The LUKE model was proposed in `LUKE: Deep Contextualized Entity Representations with Entity-aware Self-attention
|
||||
<https://arxiv.org/abs/2010.01057>`_ by Ikuya Yamada, Akari Asai, Hiroyuki Shindo, Hideaki Takeda and Yuji Matsumoto.
|
||||
It is based on RoBERTa and adds entity embeddings as well as an entity-aware self-attention mechanism, which helps
|
||||
improve performance on various downstream tasks involving reasoning about entities such as named entity recognition,
|
||||
extractive and cloze-style question answering, entity typing, and relation classification.
|
||||
|
||||
The abstract from the paper is the following:
|
||||
|
||||
*Entity representations are useful in natural language tasks involving entities. In this paper, we propose new
|
||||
pretrained contextualized representations of words and entities based on the bidirectional transformer. The proposed
|
||||
model treats words and entities in a given text as independent tokens, and outputs contextualized representations of
|
||||
them. Our model is trained using a new pretraining task based on the masked language model of BERT. The task involves
|
||||
predicting randomly masked words and entities in a large entity-annotated corpus retrieved from Wikipedia. We also
|
||||
propose an entity-aware self-attention mechanism that is an extension of the self-attention mechanism of the
|
||||
transformer, and considers the types of tokens (words or entities) when computing attention scores. The proposed model
|
||||
achieves impressive empirical performance on a wide range of entity-related tasks. In particular, it obtains
|
||||
state-of-the-art results on five well-known datasets: Open Entity (entity typing), TACRED (relation classification),
|
||||
CoNLL-2003 (named entity recognition), ReCoRD (cloze-style question answering), and SQuAD 1.1 (extractive question
|
||||
answering).*
|
||||
|
||||
Tips:
|
||||
|
||||
- This implementation is the same as :class:`~transformers.RobertaModel` with the addition of entity embeddings as well
|
||||
as an entity-aware self-attention mechanism, which improves performance on tasks involving reasoning about entities.
|
||||
- LUKE treats entities as input tokens; therefore, it takes :obj:`entity_ids`, :obj:`entity_attention_mask`,
|
||||
:obj:`entity_token_type_ids` and :obj:`entity_position_ids` as extra input. You can obtain those using
|
||||
:class:`~transformers.LukeTokenizer`.
|
||||
- :class:`~transformers.LukeTokenizer` takes :obj:`entities` and :obj:`entity_spans` (character-based start and end
|
||||
positions of the entities in the input text) as extra input. :obj:`entities` typically consist of [MASK] entities or
|
||||
Wikipedia entities. The brief description when inputting these entities are as follows:
|
||||
|
||||
- *Inputting [MASK] entities to compute entity representations*: The [MASK] entity is used to mask entities to be
|
||||
predicted during pretraining. When LUKE receives the [MASK] entity, it tries to predict the original entity by
|
||||
gathering the information about the entity from the input text. Therefore, the [MASK] entity can be used to address
|
||||
downstream tasks requiring the information of entities in text such as entity typing, relation classification, and
|
||||
named entity recognition.
|
||||
- *Inputting Wikipedia entities to compute knowledge-enhanced token representations*: LUKE learns rich information
|
||||
(or knowledge) about Wikipedia entities during pretraining and stores the information in its entity embedding. By
|
||||
using Wikipedia entities as input tokens, LUKE outputs token representations enriched by the information stored in
|
||||
the embeddings of these entities. This is particularly effective for tasks requiring real-world knowledge, such as
|
||||
question answering.
|
||||
|
||||
- There are three head models for the former use case:
|
||||
|
||||
- :class:`~transformers.LukeForEntityClassification`, for tasks to classify a single entity in an input text such as
|
||||
entity typing, e.g. the `Open Entity dataset <https://www.cs.utexas.edu/~eunsol/html_pages/open_entity.html>`__.
|
||||
This model places a linear head on top of the output entity representation.
|
||||
- :class:`~transformers.LukeForEntityPairClassification`, for tasks to classify the relationship between two entities
|
||||
such as relation classification, e.g. the `TACRED dataset <https://nlp.stanford.edu/projects/tacred/>`__. This
|
||||
model places a linear head on top of the concatenated output representation of the pair of given entities.
|
||||
- :class:`~transformers.LukeForEntitySpanClassification`, for tasks to classify the sequence of entity spans, such as
|
||||
named entity recognition (NER). This model places a linear head on top of the output entity representations. You
|
||||
can address NER using this model by inputting all possible entity spans in the text to the model.
|
||||
|
||||
:class:`~transformers.LukeTokenizer` has a ``task`` argument, which enables you to easily create an input to these
|
||||
head models by specifying ``task="entity_classification"``, ``task="entity_pair_classification"``, or
|
||||
``task="entity_span_classification"``. Please refer to the example code of each head models.
|
||||
|
||||
There are also 3 notebooks available, which showcase how you can reproduce the results as reported in the paper with
|
||||
the HuggingFace implementation of LUKE. They can be found `here
|
||||
<https://github.com/studio-ousia/luke/tree/master/notebooks>`__.
|
||||
|
||||
Example:
|
||||
|
||||
.. code-block::
|
||||
|
||||
>>> from transformers import LukeTokenizer, LukeModel, LukeForEntityPairClassification
|
||||
|
||||
>>> model = LukeModel.from_pretrained("studio-ousia/luke-base")
|
||||
>>> tokenizer = LukeTokenizer.from_pretrained("studio-ousia/luke-base")
|
||||
|
||||
# Example 1: Computing the contextualized entity representation corresponding to the entity mention "Beyoncé"
|
||||
>>> text = "Beyoncé lives in Los Angeles."
|
||||
>>> entity_spans = [(0, 7)] # character-based entity span corresponding to "Beyoncé"
|
||||
>>> inputs = tokenizer(text, entity_spans=entity_spans, add_prefix_space=True, return_tensors="pt")
|
||||
>>> outputs = model(**inputs)
|
||||
>>> word_last_hidden_state = outputs.last_hidden_state
|
||||
>>> entity_last_hidden_state = outputs.entity_last_hidden_state
|
||||
|
||||
# Example 2: Inputting Wikipedia entities to obtain enriched contextualized representations
|
||||
>>> entities = ["Beyoncé", "Los Angeles"] # Wikipedia entity titles corresponding to the entity mentions "Beyoncé" and "Los Angeles"
|
||||
>>> entity_spans = [(0, 7), (17, 28)] # character-based entity spans corresponding to "Beyoncé" and "Los Angeles"
|
||||
>>> inputs = tokenizer(text, entities=entities, entity_spans=entity_spans, add_prefix_space=True, return_tensors="pt")
|
||||
>>> outputs = model(**inputs)
|
||||
>>> word_last_hidden_state = outputs.last_hidden_state
|
||||
>>> entity_last_hidden_state = outputs.entity_last_hidden_state
|
||||
|
||||
# Example 3: Classifying the relationship between two entities using LukeForEntityPairClassification head model
|
||||
>>> model = LukeForEntityPairClassification.from_pretrained("studio-ousia/luke-large-finetuned-tacred")
|
||||
>>> tokenizer = LukeTokenizer.from_pretrained("studio-ousia/luke-large-finetuned-tacred")
|
||||
>>> entity_spans = [(0, 7), (17, 28)] # character-based entity spans corresponding to "Beyoncé" and "Los Angeles"
|
||||
>>> inputs = tokenizer(text, entity_spans=entity_spans, return_tensors="pt")
|
||||
>>> outputs = model(**inputs)
|
||||
>>> logits = outputs.logits
|
||||
>>> predicted_class_idx = int(logits[0].argmax())
|
||||
>>> print("Predicted class:", model.config.id2label[predicted_class_idx])
|
||||
|
||||
This model was contributed by `ikuyamada <https://huggingface.co/ikuyamada>`__ and `nielsr
|
||||
<https://huggingface.co/nielsr>`__. The original code can be found `here <https://github.com/studio-ousia/luke>`__.
|
||||
|
||||
|
||||
LukeConfig
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.LukeConfig
|
||||
:members:
|
||||
|
||||
|
||||
LukeTokenizer
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.LukeTokenizer
|
||||
:members: __call__, save_vocabulary
|
||||
|
||||
|
||||
LukeModel
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.LukeModel
|
||||
:members: forward
|
||||
|
||||
|
||||
LukeForEntityClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.LukeForEntityClassification
|
||||
:members: forward
|
||||
|
||||
|
||||
LukeForEntityPairClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.LukeForEntityPairClassification
|
||||
:members: forward
|
||||
|
||||
|
||||
LukeForEntitySpanClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.LukeForEntitySpanClassification
|
||||
:members: forward
|
@ -1,3 +1,15 @@
|
||||
..
|
||||
Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
LXMERT
|
||||
-----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
@ -19,7 +31,7 @@ Encoder Representations from Transformers) framework to learn these vision-and-l
|
||||
build a large-scale Transformer model that consists of three encoders: an object relationship encoder, a language
|
||||
encoder, and a cross-modality encoder. Next, to endow our model with the capability of connecting vision and language
|
||||
semantics, we pre-train the model with large amounts of image-and-sentence pairs, via five diverse representative
|
||||
pre-training tasks: masked language modeling, masked object prediction (feature regression and label classification),
|
||||
pretraining tasks: masked language modeling, masked object prediction (feature regression and label classification),
|
||||
cross-modality matching, and image question answering. These tasks help in learning both intra-modality and
|
||||
cross-modality relationships. After fine-tuning from our pretrained parameters, our model achieves the state-of-the-art
|
||||
results on two visual question answering datasets (i.e., VQA and GQA). We also show the generalizability of our
|
||||
@ -40,7 +52,8 @@ Tips:
|
||||
contains self-attention for each respective modality and cross-attention, only the cross attention is returned and
|
||||
both self attention outputs are disregarded.
|
||||
|
||||
The original code can be found `here <https://github.com/airsplay/lxmert>`__.
|
||||
This model was contributed by `eltoto1219 <https://huggingface.co/eltoto1219>`__. The original code can be found `here
|
||||
<https://github.com/airsplay/lxmert>`__.
|
||||
|
||||
|
||||
LxmertConfig
|
||||
|
130
docs/source/model_doc/m2m_100.rst
Normal file
130
docs/source/model_doc/m2m_100.rst
Normal file
@ -0,0 +1,130 @@
|
||||
..
|
||||
Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
M2M100
|
||||
-----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
Overview
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The M2M100 model was proposed in `Beyond English-Centric Multilingual Machine Translation
|
||||
<https://arxiv.org/abs/2010.11125>`__ by Angela Fan, Shruti Bhosale, Holger Schwenk, Zhiyi Ma, Ahmed El-Kishky,
|
||||
Siddharth Goyal, Mandeep Baines, Onur Celebi, Guillaume Wenzek, Vishrav Chaudhary, Naman Goyal, Tom Birch, Vitaliy
|
||||
Liptchinsky, Sergey Edunov, Edouard Grave, Michael Auli, Armand Joulin.
|
||||
|
||||
The abstract from the paper is the following:
|
||||
|
||||
*Existing work in translation demonstrated the potential of massively multilingual machine translation by training a
|
||||
single model able to translate between any pair of languages. However, much of this work is English-Centric by training
|
||||
only on data which was translated from or to English. While this is supported by large sources of training data, it
|
||||
does not reflect translation needs worldwide. In this work, we create a true Many-to-Many multilingual translation
|
||||
model that can translate directly between any pair of 100 languages. We build and open source a training dataset that
|
||||
covers thousands of language directions with supervised data, created through large-scale mining. Then, we explore how
|
||||
to effectively increase model capacity through a combination of dense scaling and language-specific sparse parameters
|
||||
to create high quality models. Our focus on non-English-Centric models brings gains of more than 10 BLEU when directly
|
||||
translating between non-English directions while performing competitively to the best single systems of WMT. We
|
||||
open-source our scripts so that others may reproduce the data, evaluation, and final M2M-100 model.*
|
||||
|
||||
This model was contributed by `valhalla <https://huggingface.co/valhalla>`__.
|
||||
|
||||
|
||||
Training and Generation
|
||||
_______________________________________________________________________________________________________________________
|
||||
|
||||
M2M100 is a multilingual encoder-decoder (seq-to-seq) model primarily intended for translation tasks. As the model is
|
||||
multilingual it expects the sequences in a certain format: A special language id token is used as prefix in both the
|
||||
source and target text. The source text format is :obj:`[lang_code] X [eos]`, where :obj:`lang_code` is source language
|
||||
id for source text and target language id for target text, with :obj:`X` being the source or target text.
|
||||
|
||||
The :class:`~transformers.M2M100Tokenizer` depends on :obj:`sentencepiece` so be sure to install it before running the
|
||||
examples. To install :obj:`sentencepiece` run ``pip install sentencepiece``.
|
||||
|
||||
- Supervised Training
|
||||
|
||||
.. code-block::
|
||||
|
||||
from transformers import M2M100Config, M2M100ForConditionalGeneration, M2M100Tokenizer
|
||||
|
||||
model = M2M100ForConditionalGeneration.from_pretrained('facebook/m2m100_418M')
|
||||
tokenizer = M2M100Tokenizer.from_pretrained('facebook/m2m100_418M', src_lang="en", tgt_lang="fr")
|
||||
|
||||
src_text = "Life is like a box of chocolates."
|
||||
tgt_lang = "La vie est comme une boîte de chocolat."
|
||||
|
||||
model_inputs = tokenizer(src_text, return_tensors="pt")
|
||||
with tokenizer.as_target_tokenizer():
|
||||
labels = tokenizer(tgt_text, return_tensors="pt").input_ids
|
||||
|
||||
loss = model(**model_inputs, labels=labels) # forward pass
|
||||
|
||||
|
||||
- Generation
|
||||
|
||||
M2M100 uses the :obj:`eos_token_id` as the :obj:`decoder_start_token_id` for generation with the target language id
|
||||
being forced as the first generated token. To force the target language id as the first generated token, pass the
|
||||
`forced_bos_token_id` parameter to the `generate` method. The following example shows how to translate between
|
||||
Hindi to French and Chinese to English using the `facebook/m2m100_418M` checkpoint.
|
||||
|
||||
.. code-block::
|
||||
|
||||
>>> from transformers import M2M100ForConditionalGeneration, M2M100Tokenizer
|
||||
|
||||
>>> hi_text = "जीवन एक चॉकलेट बॉक्स की तरह है।"
|
||||
>>> chinese_text = "生活就像一盒巧克力。"
|
||||
|
||||
>>> model = M2M100ForConditionalGeneration.from_pretrained("facebook/m2m100_418M")
|
||||
>>> tokenizer = M2M100Tokenizer.from_pretrained("facebook/m2m100_418M")
|
||||
|
||||
>>> # translate Hindi to French
|
||||
>>> tokenizer.src_lang = "hi"
|
||||
>>> encoded_hi = tokenizer(hi_text, return_tensors="pt")
|
||||
>>> generated_tokens = model.generate(**encoded_hi, forced_bos_token_id=tokenizer.get_lang_id("fr"))
|
||||
>>> tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
|
||||
"La vie est comme une boîte de chocolat."
|
||||
|
||||
>>> # translate Chinese to English
|
||||
>>> tokenizer.src_lang = "zh"
|
||||
>>> encoded_zh = tokenizer(chinese_text, return_tensors="pt")
|
||||
>>> generated_tokens = model.generate(**encoded_zh, forced_bos_token_id=tokenizer.get_lang_id("en"))
|
||||
>>> tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
|
||||
"Life is like a box of chocolate."
|
||||
|
||||
|
||||
M2M100Config
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.M2M100Config
|
||||
:members:
|
||||
|
||||
|
||||
M2M100Tokenizer
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.M2M100Tokenizer
|
||||
:members: build_inputs_with_special_tokens, get_special_tokens_mask,
|
||||
create_token_type_ids_from_sequences, save_vocabulary
|
||||
|
||||
|
||||
M2M100Model
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.M2M100Model
|
||||
:members: forward
|
||||
|
||||
|
||||
M2M100ForConditionalGeneration
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.M2M100ForConditionalGeneration
|
||||
:members: forward
|
||||
|
||||
|
@ -1,3 +1,15 @@
|
||||
..
|
||||
Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
MarianMT
|
||||
-----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
@ -21,11 +33,11 @@ Implementation Notes
|
||||
- The modeling code is the same as :class:`~transformers.BartForConditionalGeneration` with a few minor modifications:
|
||||
|
||||
- static (sinusoid) positional embeddings (:obj:`MarianConfig.static_position_embeddings=True`)
|
||||
- a new final_logits_bias (:obj:`MarianConfig.add_bias_logits=True`)
|
||||
- no layernorm_embedding (:obj:`MarianConfig.normalize_embedding=False`)
|
||||
- the model starts generating with :obj:`pad_token_id` (which has 0 as a token_embedding) as the prefix (Bart uses
|
||||
:obj:`<s/>`),
|
||||
- Code to bulk convert models can be found in ``convert_marian_to_pytorch.py``.
|
||||
- This model was contributed by `sshleifer <https://huggingface.co/sshleifer>`__.
|
||||
|
||||
Naming
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
@ -44,12 +56,10 @@ Examples
|
||||
|
||||
- Since Marian models are smaller than many other translation models available in the library, they can be useful for
|
||||
fine-tuning experiments and integration tests.
|
||||
- `Fine-tune on TPU
|
||||
<https://github.com/huggingface/transformers/blob/master/examples/seq2seq/builtin_trainer/train_distil_marian_enro_tpu.sh>`__
|
||||
- `Fine-tune on GPU
|
||||
<https://github.com/huggingface/transformers/blob/master/examples/seq2seq/builtin_trainer/train_distil_marian_enro.sh>`__
|
||||
<https://github.com/huggingface/transformers/blob/master/examples/research_projects/seq2seq-distillation/train_distil_marian_enro_teacher.sh>`__
|
||||
- `Fine-tune on GPU with pytorch-lightning
|
||||
<https://github.com/huggingface/transformers/blob/master/examples/seq2seq/distil_marian_no_teacher.sh>`__
|
||||
<https://github.com/huggingface/transformers/blob/master/examples/research_projects/seq2seq-distillation/train_distil_marian_no_teacher.sh>`__
|
||||
|
||||
Multilingual Models
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
@ -67,27 +77,29 @@ require 3 character language codes:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from transformers import MarianMTModel, MarianTokenizer
|
||||
src_text = [
|
||||
'>>fra<< this is a sentence in english that we want to translate to french',
|
||||
'>>por<< This should go to portuguese',
|
||||
'>>esp<< And this to Spanish'
|
||||
]
|
||||
>>> from transformers import MarianMTModel, MarianTokenizer
|
||||
>>> src_text = [
|
||||
... '>>fra<< this is a sentence in english that we want to translate to french',
|
||||
... '>>por<< This should go to portuguese',
|
||||
... '>>esp<< And this to Spanish'
|
||||
>>> ]
|
||||
|
||||
model_name = 'Helsinki-NLP/opus-mt-en-roa'
|
||||
tokenizer = MarianTokenizer.from_pretrained(model_name)
|
||||
print(tokenizer.supported_language_codes)
|
||||
model = MarianMTModel.from_pretrained(model_name)
|
||||
translated = model.generate(**tokenizer.prepare_seq2seq_batch(src_text, return_tensors="pt"))
|
||||
tgt_text = [tokenizer.decode(t, skip_special_tokens=True) for t in translated]
|
||||
# ["c'est une phrase en anglais que nous voulons traduire en français",
|
||||
# 'Isto deve ir para o português.',
|
||||
# 'Y esto al español']
|
||||
>>> model_name = 'Helsinki-NLP/opus-mt-en-roa'
|
||||
>>> tokenizer = MarianTokenizer.from_pretrained(model_name)
|
||||
>>> print(tokenizer.supported_language_codes)
|
||||
['>>zlm_Latn<<', '>>mfe<<', '>>hat<<', '>>pap<<', '>>ast<<', '>>cat<<', '>>ind<<', '>>glg<<', '>>wln<<', '>>spa<<', '>>fra<<', '>>ron<<', '>>por<<', '>>ita<<', '>>oci<<', '>>arg<<', '>>min<<']
|
||||
|
||||
>>> model = MarianMTModel.from_pretrained(model_name)
|
||||
>>> translated = model.generate(**tokenizer(src_text, return_tensors="pt", padding=True))
|
||||
>>> [tokenizer.decode(t, skip_special_tokens=True) for t in translated]
|
||||
["c'est une phrase en anglais que nous voulons traduire en français",
|
||||
'Isto deve ir para o português.',
|
||||
'Y esto al español']
|
||||
|
||||
|
||||
|
||||
|
||||
Code to see available pretrained models:
|
||||
Here is the code to see all available pretrained models on the hub:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
@ -138,21 +150,22 @@ Example of translating english to many romance languages, using old-style 2 char
|
||||
|
||||
.. code-block::python
|
||||
|
||||
from transformers import MarianMTModel, MarianTokenizer
|
||||
src_text = [
|
||||
'>>fr<< this is a sentence in english that we want to translate to french',
|
||||
'>>pt<< This should go to portuguese',
|
||||
'>>es<< And this to Spanish'
|
||||
]
|
||||
>>> from transformers import MarianMTModel, MarianTokenizer
|
||||
>>> src_text = [
|
||||
... '>>fr<< this is a sentence in english that we want to translate to french',
|
||||
... '>>pt<< This should go to portuguese',
|
||||
... '>>es<< And this to Spanish'
|
||||
>>> ]
|
||||
|
||||
model_name = 'Helsinki-NLP/opus-mt-en-ROMANCE'
|
||||
tokenizer = MarianTokenizer.from_pretrained(model_name)
|
||||
print(tokenizer.supported_language_codes)
|
||||
>>> model_name = 'Helsinki-NLP/opus-mt-en-ROMANCE'
|
||||
>>> tokenizer = MarianTokenizer.from_pretrained(model_name)
|
||||
|
||||
model = MarianMTModel.from_pretrained(model_name)
|
||||
translated = model.generate(**tokenizer.prepare_seq2seq_batch(src_text, return_tensors="pt"))
|
||||
tgt_text = [tokenizer.decode(t, skip_special_tokens=True) for t in translated]
|
||||
# ["c'est une phrase en anglais que nous voulons traduire en français", 'Isto deve ir para o português.', 'Y esto al español']
|
||||
>>> model = MarianMTModel.from_pretrained(model_name)
|
||||
>>> translated = model.generate(**tokenizer(src_text, return_tensors="pt", padding=True))
|
||||
>>> tgt_text = [tokenizer.decode(t, skip_special_tokens=True) for t in translated]
|
||||
["c'est une phrase en anglais que nous voulons traduire en français",
|
||||
'Isto deve ir para o português.',
|
||||
'Y esto al español']
|
||||
|
||||
|
||||
|
||||
@ -167,16 +180,39 @@ MarianTokenizer
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.MarianTokenizer
|
||||
:members: prepare_seq2seq_batch
|
||||
:members: as_target_tokenizer
|
||||
|
||||
|
||||
MarianModel
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.MarianModel
|
||||
:members: forward
|
||||
|
||||
|
||||
MarianMTModel
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.MarianMTModel
|
||||
:members: forward
|
||||
|
||||
|
||||
MarianForCausalLM
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.MarianForCausalLM
|
||||
:members: forward
|
||||
|
||||
|
||||
TFMarianModel
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.TFMarianModel
|
||||
:members: call
|
||||
|
||||
|
||||
TFMarianMTModel
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.TFMarianMTModel
|
||||
:members: call
|
||||
|
@ -1,11 +1,23 @@
|
||||
MBart
|
||||
..
|
||||
Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
MBart and MBart-50
|
||||
-----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
**DISCLAIMER:** If you see something strange, file a `Github Issue
|
||||
<https://github.com/huggingface/transformers/issues/new?assignees=&labels=&template=bug-report.md&title>`__ and assign
|
||||
@patrickvonplaten
|
||||
|
||||
Overview
|
||||
Overview of MBart
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The MBart model was presented in `Multilingual Denoising Pre-training for Neural Machine Translation
|
||||
@ -13,39 +25,41 @@ The MBart model was presented in `Multilingual Denoising Pre-training for Neural
|
||||
Ghazvininejad, Mike Lewis, Luke Zettlemoyer.
|
||||
|
||||
According to the abstract, MBART is a sequence-to-sequence denoising auto-encoder pretrained on large-scale monolingual
|
||||
corpora in many languages using the BART objective. mBART is one of the first methods for pre-training a complete
|
||||
corpora in many languages using the BART objective. mBART is one of the first methods for pretraining a complete
|
||||
sequence-to-sequence model by denoising full texts in multiple languages, while previous approaches have focused only
|
||||
on the encoder, decoder, or reconstructing parts of the text.
|
||||
|
||||
The Authors' code can be found `here <https://github.com/pytorch/fairseq/tree/master/examples/mbart>`__
|
||||
This model was contributed by `valhalla <https://huggingface.co/valhalla>`__. The Authors' code can be found `here
|
||||
<https://github.com/pytorch/fairseq/tree/master/examples/mbart>`__
|
||||
|
||||
Examples
|
||||
Training of MBart
|
||||
_______________________________________________________________________________________________________________________
|
||||
|
||||
- Examples and scripts for fine-tuning mBART and other models for sequence to sequence tasks can be found in
|
||||
`examples/seq2seq/ <https://github.com/huggingface/transformers/blob/master/examples/seq2seq/README.md>`__.
|
||||
- Given the large embeddings table, mBART consumes a large amount of GPU RAM, especially for fine-tuning.
|
||||
:class:`MarianMTModel` is usually a better choice for bilingual machine translation.
|
||||
MBart is a multilingual encoder-decoder (sequence-to-sequence) model primarily intended for translation task. As the
|
||||
model is multilingual it expects the sequences in a different format. A special language id token is added in both the
|
||||
source and target text. The source text format is :obj:`X [eos, src_lang_code]` where :obj:`X` is the source text. The
|
||||
target text format is :obj:`[tgt_lang_code] X [eos]`. :obj:`bos` is never used.
|
||||
|
||||
Training
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
MBart is a multilingual encoder-decoder (seq-to-seq) model primarily intended for translation task. As the model is
|
||||
multilingual it expects the sequences in a different format. A special language id token is added in both the source
|
||||
and target text. The source text format is :obj:`X [eos, src_lang_code]` where :obj:`X` is the source text. The target
|
||||
text format is :obj:`[tgt_lang_code] X [eos]`. :obj:`bos` is never used.
|
||||
|
||||
The :meth:`~transformers.MBartTokenizer.prepare_seq2seq_batch` handles this automatically and should be used to encode
|
||||
the sequences for sequence-to-sequence fine-tuning.
|
||||
The regular :meth:`~transformers.MBartTokenizer.__call__` will encode source text format, and it should be wrapped
|
||||
inside the context manager :meth:`~transformers.MBartTokenizer.as_target_tokenizer` to encode target text format.
|
||||
|
||||
- Supervised training
|
||||
|
||||
.. code-block::
|
||||
|
||||
example_english_phrase = "UN Chief Says There Is No Military Solution in Syria"
|
||||
expected_translation_romanian = "Şeful ONU declară că nu există o soluţie militară în Siria"
|
||||
batch = tokenizer.prepare_seq2seq_batch(example_english_phrase, src_lang="en_XX", tgt_lang="ro_RO", tgt_texts=expected_translation_romanian, return_tensors="pt")
|
||||
model(input_ids=batch['input_ids'], labels=batch['labels']) # forward pass
|
||||
>>> from transformers import MBartForConditionalGeneration, MBartTokenizer
|
||||
|
||||
>>> tokenizer = MBartTokenizer.from_pretrained("facebook/mbart-large-en-ro")
|
||||
>>> example_english_phrase = "UN Chief Says There Is No Military Solution in Syria"
|
||||
>>> expected_translation_romanian = "Şeful ONU declară că nu există o soluţie militară în Siria"
|
||||
|
||||
>>> inputs = tokenizer(example_english_phrase, return_tensors="pt", src_lang="en_XX", tgt_lang="ro_RO")
|
||||
>>> with tokenizer.as_target_tokenizer():
|
||||
... labels = tokenizer(expected_translation_romanian, return_tensors="pt")
|
||||
|
||||
>>> model = MBartForConditionalGeneration.from_pretrained("facebook/mbart-large-en-ro")
|
||||
>>> # forward pass
|
||||
>>> model(**inputs, labels=batch['labels'])
|
||||
|
||||
- Generation
|
||||
|
||||
@ -54,14 +68,95 @@ the sequences for sequence-to-sequence fine-tuning.
|
||||
|
||||
.. code-block::
|
||||
|
||||
from transformers import MBartForConditionalGeneration, MBartTokenizer
|
||||
model = MBartForConditionalGeneration.from_pretrained("facebook/mbart-large-en-ro")
|
||||
tokenizer = MBartTokenizer.from_pretrained("facebook/mbart-large-en-ro")
|
||||
article = "UN Chief Says There Is No Military Solution in Syria"
|
||||
batch = tokenizer.prepare_seq2seq_batch(src_texts=[article], src_lang="en_XX", return_tensors="pt")
|
||||
translated_tokens = model.generate(**batch, decoder_start_token_id=tokenizer.lang_code_to_id["ro_RO"])
|
||||
translation = tokenizer.batch_decode(translated_tokens, skip_special_tokens=True)[0]
|
||||
assert translation == "Şeful ONU declară că nu există o soluţie militară în Siria"
|
||||
>>> from transformers import MBartForConditionalGeneration, MBartTokenizer
|
||||
|
||||
>>> tokenizer = MBartTokenizer.from_pretrained("facebook/mbart-large-en-ro", src_lang="en_XX")
|
||||
>>> article = "UN Chief Says There Is No Military Solution in Syria"
|
||||
>>> inputs = tokenizer(article, return_tensors="pt")
|
||||
>>> translated_tokens = model.generate(**inputs, decoder_start_token_id=tokenizer.lang_code_to_id["ro_RO"])
|
||||
>>> tokenizer.batch_decode(translated_tokens, skip_special_tokens=True)[0]
|
||||
"Şeful ONU declară că nu există o soluţie militară în Siria"
|
||||
|
||||
|
||||
Overview of MBart-50
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
MBart-50 was introduced in the `Multilingual Translation with Extensible Multilingual Pretraining and Finetuning
|
||||
<https://arxiv.org/abs/2008.00401>` paper by Yuqing Tang, Chau Tran, Xian Li, Peng-Jen Chen, Naman Goyal, Vishrav
|
||||
Chaudhary, Jiatao Gu, Angela Fan. MBart-50 is created using the original `mbart-large-cc25` checkpoint by extendeding
|
||||
its embedding layers with randomly initialized vectors for an extra set of 25 language tokens and then pretrained on 50
|
||||
languages.
|
||||
|
||||
According to the abstract
|
||||
|
||||
*Multilingual translation models can be created through multilingual finetuning. Instead of finetuning on one
|
||||
direction, a pretrained model is finetuned on many directions at the same time. It demonstrates that pretrained models
|
||||
can be extended to incorporate additional languages without loss of performance. Multilingual finetuning improves on
|
||||
average 1 BLEU over the strongest baselines (being either multilingual from scratch or bilingual finetuning) while
|
||||
improving 9.3 BLEU on average over bilingual baselines from scratch.*
|
||||
|
||||
|
||||
Training of MBart-50
|
||||
_______________________________________________________________________________________________________________________
|
||||
|
||||
The text format for MBart-50 is slightly different from mBART. For MBart-50 the language id token is used as a prefix
|
||||
for both source and target text i.e the text format is :obj:`[lang_code] X [eos]`, where :obj:`lang_code` is source
|
||||
language id for source text and target language id for target text, with :obj:`X` being the source or target text
|
||||
respectively.
|
||||
|
||||
|
||||
MBart-50 has its own tokenizer :class:`~transformers.MBart50Tokenizer`.
|
||||
|
||||
- Supervised training
|
||||
|
||||
.. code-block::
|
||||
|
||||
from transformers import MBartForConditionalGeneration, MBart50TokenizerFast
|
||||
|
||||
model = MBartForConditionalGeneration.from_pretrained("facebook/mbart-large-50")
|
||||
tokenizer = MBart50TokenizerFast.from_pretrained("facebook/mbart-large-50", src_lang="en_XX", tgt_lang="ro_RO")
|
||||
|
||||
src_text = " UN Chief Says There Is No Military Solution in Syria"
|
||||
tgt_text = "Şeful ONU declară că nu există o soluţie militară în Siria"
|
||||
|
||||
model_inputs = tokenizer(src_text, return_tensors="pt")
|
||||
with tokenizer.as_target_tokenizer():
|
||||
labels = tokenizer(tgt_text, return_tensors="pt").input_ids
|
||||
|
||||
model(**model_inputs, labels=labels) # forward pass
|
||||
|
||||
|
||||
- Generation
|
||||
|
||||
To generate using the mBART-50 multilingual translation models, :obj:`eos_token_id` is used as the
|
||||
:obj:`decoder_start_token_id` and the target language id is forced as the first generated token. To force the
|
||||
target language id as the first generated token, pass the `forced_bos_token_id` parameter to the `generate` method.
|
||||
The following example shows how to translate between Hindi to French and Arabic to English using the
|
||||
`facebook/mbart-50-large-many-to-many` checkpoint.
|
||||
|
||||
.. code-block::
|
||||
|
||||
from transformers import MBartForConditionalGeneration, MBart50TokenizerFast
|
||||
|
||||
article_hi = "संयुक्त राष्ट्र के प्रमुख का कहना है कि सीरिया में कोई सैन्य समाधान नहीं है"
|
||||
article_ar = "الأمين العام للأمم المتحدة يقول إنه لا يوجد حل عسكري في سوريا."
|
||||
|
||||
model = MBartForConditionalGeneration.from_pretrained("facebook/mbart-large-50-many-to-many-mmt")
|
||||
tokenizer = MBart50TokenizerFast.from_pretrained("facebook/mbart-large-50-many-to-many-mmt")
|
||||
|
||||
# translate Hindi to French
|
||||
tokenizer.src_lang = "hi_IN"
|
||||
encoded_hi = tokenizer(article_hi, return_tensors="pt")
|
||||
generated_tokens = model.generate(**encoded_hi, forced_bos_token_id=tokenizer.lang_code_to_id["fr_XX"])
|
||||
tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
|
||||
# => "Le chef de l 'ONU affirme qu 'il n 'y a pas de solution militaire en Syria."
|
||||
|
||||
# translate Arabic to English
|
||||
tokenizer.src_lang = "ar_AR"
|
||||
encoded_ar = tokenizer(article_ar, return_tensors="pt")
|
||||
generated_tokens = model.generate(**encoded_ar, forced_bos_token_id=tokenizer.lang_code_to_id["en_XX"])
|
||||
tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
|
||||
# => "The Secretary-General of the United Nations says there is no military solution in Syria."
|
||||
|
||||
|
||||
MBartConfig
|
||||
@ -75,7 +170,35 @@ MBartTokenizer
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.MBartTokenizer
|
||||
:members: build_inputs_with_special_tokens, prepare_seq2seq_batch
|
||||
:members: as_target_tokenizer, build_inputs_with_special_tokens
|
||||
|
||||
|
||||
MBartTokenizerFast
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.MBartTokenizerFast
|
||||
:members:
|
||||
|
||||
|
||||
MBart50Tokenizer
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.MBart50Tokenizer
|
||||
:members:
|
||||
|
||||
|
||||
MBart50TokenizerFast
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.MBart50TokenizerFast
|
||||
:members:
|
||||
|
||||
|
||||
MBartModel
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.MBartModel
|
||||
:members:
|
||||
|
||||
|
||||
MBartForConditionalGeneration
|
||||
@ -85,8 +208,35 @@ MBartForConditionalGeneration
|
||||
:members:
|
||||
|
||||
|
||||
MBartForQuestionAnswering
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.MBartForQuestionAnswering
|
||||
:members:
|
||||
|
||||
|
||||
MBartForSequenceClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.MBartForSequenceClassification
|
||||
|
||||
|
||||
MBartForCausalLM
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.MBartForCausalLM
|
||||
:members: forward
|
||||
|
||||
|
||||
TFMBartModel
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.TFMBartModel
|
||||
:members: call
|
||||
|
||||
|
||||
TFMBartForConditionalGeneration
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.TFMBartForConditionalGeneration
|
||||
:members:
|
||||
:members: call
|
||||
|
154
docs/source/model_doc/megatron_bert.rst
Normal file
154
docs/source/model_doc/megatron_bert.rst
Normal file
@ -0,0 +1,154 @@
|
||||
..
|
||||
Copyright 2021 NVIDIA Corporation and The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
MegatronBERT
|
||||
-----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
Overview
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The MegatronBERT model was proposed in `Megatron-LM: Training Multi-Billion Parameter Language Models Using Model
|
||||
Parallelism <https://arxiv.org/abs/1909.08053>`__ by Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley,
|
||||
Jared Casper and Bryan Catanzaro.
|
||||
|
||||
The abstract from the paper is the following:
|
||||
|
||||
*Recent work in language modeling demonstrates that training large transformer models advances the state of the art in
|
||||
Natural Language Processing applications. However, very large models can be quite difficult to train due to memory
|
||||
constraints. In this work, we present our techniques for training very large transformer models and implement a simple,
|
||||
efficient intra-layer model parallel approach that enables training transformer models with billions of parameters. Our
|
||||
approach does not require a new compiler or library changes, is orthogonal and complimentary to pipeline model
|
||||
parallelism, and can be fully implemented with the insertion of a few communication operations in native PyTorch. We
|
||||
illustrate this approach by converging transformer based models up to 8.3 billion parameters using 512 GPUs. We sustain
|
||||
15.1 PetaFLOPs across the entire application with 76% scaling efficiency when compared to a strong single GPU baseline
|
||||
that sustains 39 TeraFLOPs, which is 30% of peak FLOPs. To demonstrate that large language models can further advance
|
||||
the state of the art (SOTA), we train an 8.3 billion parameter transformer language model similar to GPT-2 and a 3.9
|
||||
billion parameter model similar to BERT. We show that careful attention to the placement of layer normalization in
|
||||
BERT-like models is critical to achieving increased performance as the model size grows. Using the GPT-2 model we
|
||||
achieve SOTA results on the WikiText103 (10.8 compared to SOTA perplexity of 15.8) and LAMBADA (66.5% compared to SOTA
|
||||
accuracy of 63.2%) datasets. Our BERT model achieves SOTA results on the RACE dataset (90.9% compared to SOTA accuracy
|
||||
of 89.4%).*
|
||||
|
||||
Tips:
|
||||
|
||||
We have provided pretrained `BERT-345M <https://ngc.nvidia.com/catalog/models/nvidia:megatron_bert_345m>`__ checkpoints
|
||||
for use to evaluate or finetuning downstream tasks.
|
||||
|
||||
To access these checkpoints, first `sign up <https://ngc.nvidia.com/signup>`__ for and setup the NVIDIA GPU Cloud (NGC)
|
||||
Registry CLI. Further documentation for downloading models can be found in the `NGC documentation
|
||||
<https://docs.nvidia.com/dgx/ngc-registry-cli-user-guide/index.html#topic_6_4_1>`__.
|
||||
|
||||
Alternatively, you can directly download the checkpoints using:
|
||||
|
||||
BERT-345M-uncased::
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
wget --content-disposition https://api.ngc.nvidia.com/v2/models/nvidia/megatron_bert_345m/versions/v0.1_uncased/zip
|
||||
-O megatron_bert_345m_v0_1_uncased.zip
|
||||
|
||||
BERT-345M-cased::
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
wget --content-disposition https://api.ngc.nvidia.com/v2/models/nvidia/megatron_bert_345m/versions/v0.1_cased/zip -O
|
||||
megatron_bert_345m_v0_1_cased.zip
|
||||
|
||||
Once you have obtained the checkpoints from NVIDIA GPU Cloud (NGC), you have to convert them to a format that will
|
||||
easily be loaded by Hugging Face Transformers and our port of the BERT code.
|
||||
|
||||
The following commands allow you to do the conversion. We assume that the folder ``models/megatron_bert`` contains
|
||||
``megatron_bert_345m_v0_1_{cased, uncased}.zip`` and that the commands are run from inside that folder::
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
python3 $PATH_TO_TRANSFORMERS/models/megatron_bert/convert_megatron_bert_checkpoint.py megatron_bert_345m_v0_1_uncased.zip
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
python3 $PATH_TO_TRANSFORMERS/models/megatron_bert/convert_megatron_bert_checkpoint.py megatron_bert_345m_v0_1_cased.zip
|
||||
|
||||
This model was contributed by `jdemouth <https://huggingface.co/jdemouth>`__. The original code can be found `here
|
||||
<https://github.com/NVIDIA/Megatron-LM>`__. That repository contains a multi-GPU and multi-node implementation of the
|
||||
Megatron Language models. In particular, it contains a hybrid model parallel approach using "tensor parallel" and
|
||||
"pipeline parallel" techniques.
|
||||
|
||||
MegatronBertConfig
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.MegatronBertConfig
|
||||
:members:
|
||||
|
||||
|
||||
MegatronBertModel
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.MegatronBertModel
|
||||
:members: forward
|
||||
|
||||
|
||||
MegatronBertForMaskedLM
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.MegatronBertForMaskedLM
|
||||
:members: forward
|
||||
|
||||
|
||||
MegatronBertForCausalLM
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.MegatronBertForCausalLM
|
||||
:members: forward
|
||||
|
||||
|
||||
MegatronBertForNextSentencePrediction
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.MegatronBertForNextSentencePrediction
|
||||
:members: forward
|
||||
|
||||
|
||||
MegatronBertForPreTraining
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.MegatronBertForPreTraining
|
||||
:members: forward
|
||||
|
||||
|
||||
MegatronBertForSequenceClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.MegatronBertForSequenceClassification
|
||||
:members: forward
|
||||
|
||||
|
||||
MegatronBertForMultipleChoice
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.MegatronBertForMultipleChoice
|
||||
:members: forward
|
||||
|
||||
|
||||
MegatronBertForTokenClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.MegatronBertForTokenClassification
|
||||
:members: forward
|
||||
|
||||
|
||||
MegatronBertForQuestionAnswering
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.MegatronBertForQuestionAnswering
|
||||
:members: forward
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user