Compare commits
	
		
			971 Commits
		
	
	
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
| 1d646badbb | |||
| 9676d1a2a8 | |||
| 8349d75773 | |||
| fb056494e5 | |||
| 36f592cc82 | |||
| ad4a393e2e | |||
| c4ac7a76db | |||
| 4acd87ff4e | |||
| cf5c5c9e1c | |||
| 4dde31cb76 | |||
| 17ea43cf98 | |||
| 80bf868a26 | |||
| 481d9c4fb5 | |||
| 4ddc31ff40 | |||
| f47f7f4611 | |||
| 9fabc0b6a9 | |||
| 31c23bd5ee | |||
| 2f071fcb02 | |||
| 5705333441 | |||
| f2a337b3ed | |||
| 4a233e5b2c | |||
| 7a99e4b196 | |||
| 7c9f8f93f9 | |||
| d6dde438ea | |||
| 4a21c4d88d | |||
| 2967de06f4 | |||
| a6bcfb8015 | |||
| 78863f6b36 | |||
| 8a618e0af5 | |||
| 3b7fb48c3b | |||
| a049c8043b | |||
| 5def3302f4 | |||
| f71758f7a4 | |||
| 0f091062d4 | |||
| c4acc3a8e9 | |||
| e8e956dbb2 | |||
| e4022d96f7 | |||
| 1761d2091a | |||
| 789ea72037 | |||
| 1cbd566c63 | |||
| 743e383d4b | |||
| 99a90e43d4 | |||
| b5ec526f85 | |||
| a6981076ec | |||
| 0b82e3d0d9 | |||
| f09e5ecef0 | |||
| 128bdd4c35 | |||
| 72402d1acd | |||
| 28a30af6d1 | |||
| de203853cc | |||
| 559790f9e4 | |||
| b3087ddde8 | |||
| 4761a39781 | |||
| 45a6f2edd9 | |||
| e7ba5bc85b | |||
| d340e2329e | |||
| b94f73bab7 | |||
| 9678c49419 | |||
| f3d1511b5b | |||
| dd2d90f344 | |||
| ee261439a9 | |||
| 29bb3e4eb0 | |||
| f5397ffc3b | |||
| 271f213621 | |||
| cf9c1cbb60 | |||
| 2167e366ba | |||
| e9a103c17a | |||
| c832f43a4d | |||
| 3927d7756c | |||
| 0ea82b246f | |||
| 9d44236f70 | |||
| a7e01a248b | |||
| 8ba44ced95 | |||
| 2b11fa5174 | |||
| 6448396d54 | |||
| 1e47dee24c | |||
| c9591f6fac | |||
| 798da627eb | |||
| c014d1f0c6 | |||
| 0b22e47a40 | |||
| 830d212be7 | |||
| 7c0f2d0a6a | |||
| a31e591d27 | |||
| 447de34dde | |||
| 98dd19b96b | |||
| 68a3e0223a | |||
| a2d4950f5c | |||
| 9f995b99d4 | |||
| 3fe5c8e8a8 | |||
| 354944e607 | |||
| ab984a8b72 | |||
| 3df208c93a | |||
| 66ea76b8a9 | |||
| 60414f31a9 | |||
| baa74326ab | |||
| c10c7d59e7 | |||
| bf503158c5 | |||
| 8cba057260 | |||
| 6393261e41 | |||
| dcc9bb3252 | |||
| af23b626c8 | |||
| c4d4f3ec8c | |||
| d572d7027b | |||
| de8e14b6c0 | |||
| 88368c2a16 | |||
| 2d8ec5a684 | |||
| 75635072e1 | |||
| 92a9976e91 | |||
| 59057abe52 | |||
| bac332fec0 | |||
| c3df2136e1 | |||
| e391d4735e | |||
| 119610b5c5 | |||
| 08e4ad5eea | |||
| 0d1dad6d53 | |||
| 8960988f35 | |||
| b57bfb5fa0 | |||
| 46ffc28329 | |||
| 15143fbad6 | |||
| 3cd6289758 | |||
| 36362cf086 | |||
| 3a527fa820 | |||
| 556442afb3 | |||
| 160b5d6080 | |||
| 26497d1199 | |||
| 6a083fd447 | |||
| f6969cc12b | |||
| e768f2322a | |||
| 8334993915 | |||
| 62760baf46 | |||
| 45de034bf8 | |||
| 5a81e79e25 | |||
| 5882c442e5 | |||
| a9debaca3d | |||
| c88f05163d | |||
| 982f181aa7 | |||
| 84b9d1c423 | |||
| 603b470a3d | |||
| 4812a5a767 | |||
| 4b956b2a6b | |||
| b97af8cce9 | |||
| 65c49bb27e | |||
| 39c38b2ea0 | |||
| dcddf498c8 | |||
| d3a3a0353c | |||
| a84adddd1b | |||
| 32e1332acf | |||
| b62abe87c9 | |||
| 969d3ae95e | |||
| 646711e1e2 | |||
| 4356f791a2 | |||
| 11ac4b9555 | |||
| 8bdee1cb73 | |||
| 7424b2848f | |||
| 364920e216 | |||
| 23c23f5399 | |||
| 99a54ac51c | |||
| 439b37b474 | |||
| f2cf6ce4a9 | |||
| 465870c33f | |||
| 16b6361792 | |||
| 32aabe8c33 | |||
| 2c177a87eb | |||
| f851fb55ca | |||
| eab980fd68 | |||
| a95ced6260 | |||
| 50c6bc4195 | |||
| 4b082bd4d8 | |||
| e5df36397b | |||
| 0537139b2b | |||
| 84d346b687 | |||
| 3f05de6dde | |||
| 33cb00f41a | |||
| 78b2a53f10 | |||
| 6b3438df21 | |||
| e360037236 | |||
| b7175a2701 | |||
| 995e38b7af | |||
| 3401980fc4 | |||
| 728637356c | |||
| 34f28b2a13 | |||
| ad88563bda | |||
| 64d83c7ae0 | |||
| 01597e5b90 | |||
| f5c698b21a | |||
| 6dc4b6f34c | |||
| e30579f764 | |||
| 518307dfcd | |||
| 9d0a11a68c | |||
| 24a20483f5 | |||
| 6f152572cd | |||
| a4704b1263 | |||
| ad0ab9afe9 | |||
| 59fe641b8b | |||
| d68a8fe462 | |||
| 7ae642b72d | |||
| 69bff89935 | |||
| 1efb1f1660 | |||
| 1eb125fb95 | |||
| 3f91338be9 | |||
| f47f9a5874 | |||
| ee027c89f2 | |||
| e52737d5ad | |||
| 5e151f5e77 | |||
| 593c070435 | |||
| 5ac8b62265 | |||
| 5c6cac102b | |||
| ed717635ff | |||
| 04b50cabf6 | |||
| dddd6b9927 | |||
| f9453d15e5 | |||
| f7ee2e5d20 | |||
| d737947725 | |||
| 705237b4ec | |||
| 600a42329b | |||
| 04d2006f28 | |||
| 7f6a0c0d69 | |||
| 7c0baf9521 | |||
| 7775a3d2ed | |||
| 33dd59e971 | |||
| 5951d86024 | |||
| aa4c8804f2 | |||
| 134847db81 | |||
| 981f7f5253 | |||
| bffd17a43d | |||
| 85df4f7cca | |||
| 11fae9e636 | |||
| 121f88cae3 | |||
| d77abd4d08 | |||
| 2a667b1eb9 | |||
| 0be6a2a624 | |||
| 7fba47b7d9 | |||
| e25cba78cf | |||
| 38b79b5a63 | |||
| 0b52642d37 | |||
| 89fd3450a6 | |||
| 9fd6e7ab9f | |||
| a15562e170 | |||
| 0287d264e9 | |||
| 7f522437bc | |||
| 3fbf301bba | |||
| 2dcc5a1629 | |||
| 7b0c99add9 | |||
| 31d3373bc9 | |||
| fede4ef45d | |||
| b6cd856b08 | |||
| ff7368eb6b | |||
| 6ae0bb5291 | |||
| 819b468f70 | |||
| 58b59a0c31 | |||
| a1c34bd286 | |||
| ea86bef545 | |||
| e0f867a9ba | |||
| 11600edc6e | |||
| b6992b7b47 | |||
| bdb4409ed8 | |||
| 0c8e823b03 | |||
| 0cd283522a | |||
| c85b5db61a | |||
| 5c2b94c82a | |||
| 87747518e9 | |||
| 719cb3738d | |||
| fc1fbae45d | |||
| 42e00cf9e1 | |||
| d7a4c3252e | |||
| 7f006cdd87 | |||
| 0fd0b674e6 | |||
| b65a994f59 | |||
| 1d438f15b3 | |||
| 574c5b3a72 | |||
| 09363f2a8b | |||
| 51e980ce36 | |||
| 206c35e9a4 | |||
| f3d18c71ec | |||
| d483cd8e46 | |||
| d2f21f08f5 | |||
| 12b9cc9e26 | |||
| bfe93a5a21 | |||
| 256086bc69 | |||
| 80aa87d9a3 | |||
| 455a4c842c | |||
| 7a1f174a9d | |||
| c665e0fcfe | |||
| 9b6e3b34d9 | |||
| dec8f4d6fd | |||
| bc29aa67a9 | |||
| f35f612280 | |||
| 7ca9653852 | |||
| 25e8389439 | |||
| dc43215c01 | |||
| 282c276e09 | |||
| 803c1cc4ea | |||
| 7044ed6b05 | |||
| cd65c41a83 | |||
| 69da972ace | |||
| 88111de07c | |||
| b66e9b4433 | |||
| 0a2fecdf90 | |||
| 3871b8a107 | |||
| 8678ff8df5 | |||
| e0caab0cf0 | |||
| a600b30cc3 | |||
| 20c06fa37d | |||
| 39eb31e11e | |||
| 350bb6bffa | |||
| 82462c5cba | |||
| 41f35d0b3d | |||
| 01ad55f8cf | |||
| 50e615f43d | |||
| f8aace6bcd | |||
| 8faf2e086b | |||
| f7978490b2 | |||
| ce5ef4b35d | |||
| 5dd7b677ad | |||
| ca1a00a302 | |||
| 4e6a3172ce | |||
| fd10d79b55 | |||
| abe734ca1f | |||
| 0f5a799456 | |||
| d51f72d5de | |||
| 306af132d7 | |||
| 50e6daf83a | |||
| 0517e7a1cb | |||
| 6e1ac34e2b | |||
| 2fb9a934b4 | |||
| c8731b9583 | |||
| 6060b2f89b | |||
| 07e21307b6 | |||
| caf1d116a6 | |||
| e7fba4bef5 | |||
| fe8fb10b44 | |||
| 2a2832ce73 | |||
| 942d3f4b20 | |||
| bf3dc778b8 | |||
| 0a74c88ac6 | |||
| 5f297c7be3 | |||
| d9847678b3 | |||
| 0f8ad89206 | |||
| 9ce42dc540 | |||
| 1d15a7f278 | |||
| ed2ab1c220 | |||
| 0ecfd17f49 | |||
| 50792dbdcc | |||
| e7706f514b | |||
| b5eb283aaa | |||
| f753d4e32b | |||
| 75bc2a03cc | |||
| 1dc43e56c9 | |||
| 912a377e90 | |||
| c9bce1811c | |||
| 62df4ba59a | |||
| 4ce5f36f78 | |||
| ec4b1c659f | |||
| df52abe373 | |||
| 43c243254a | |||
| 3c7e676f8b | |||
| a5fe16687b | |||
| 497f73c964 | |||
| 93e82ab424 | |||
| 19b7c9b0b7 | |||
| fea921d382 | |||
| da1e4e53fc | |||
| 0d8f8848d5 | |||
| 7f2c384c80 | |||
| 4d16b279e5 | |||
| c513415b19 | |||
| 778a263f09 | |||
| 74d78beeb4 | |||
| 7f5d85347e | |||
| 906581ae3c | |||
| b247b0d880 | |||
| 780f183e55 | |||
| e424d2e45d | |||
| 1ae81e4aa1 | |||
| 5d29f8e99b | |||
| a8ad83040d | |||
| ca4baf8ca1 | |||
| 60c984da6c | |||
| 42968138c8 | |||
| 1d23240068 | |||
| d06c5a2a0a | |||
| edc5222fc3 | |||
| 9cf298dfc1 | |||
| 0d288727b8 | |||
| 447afe9cdf | |||
| a175a9dc01 | |||
| 53282b5bd0 | |||
| 26bda77225 | |||
| c8933bb2d9 | |||
| e08c01aa1a | |||
| 84a3a9689d | |||
| f68339639a | |||
| cb60ce59dd | |||
| 529a16dec6 | |||
| f1b018740c | |||
| e85123d398 | |||
| 06510ccb53 | |||
| 3bcbebd440 | |||
| 436ce07218 | |||
| ab7bd5ef98 | |||
| 47d6853439 | |||
| df9d6effae | |||
| 3f20dd7186 | |||
| e13465fb8b | |||
| c603d099aa | |||
| 2ba1a14fb0 | |||
| 90dcd8c05d | |||
| 57272d5ddf | |||
| b006a7a12f | |||
| 14eef67eb2 | |||
| 296df2b18c | |||
| 55f69a11b6 | |||
| 47267ba556 | |||
| 034aa0c2d7 | |||
| e00b4ff1de | |||
| 814a3f4e01 | |||
| 2f9397139d | |||
| d6bbcbc4cf | |||
| 6f877d9daf | |||
| 07681b6b58 | |||
| fdc487d8b3 | |||
| aa05dc8935 | |||
| e4515faf54 | |||
| 41789c6c3d | |||
| 260c86082d | |||
| d30cbaf5dc | |||
| 9beaa85b07 | |||
| e753f249e1 | |||
| 2d042274ac | |||
| 3bffd2e8e5 | |||
| c3619f5536 | |||
| 3b56427a1e | |||
| 43489756ad | |||
| a690edab17 | |||
| ad6e62cd82 | |||
| 388e3251fa | |||
| f5e2ed0fd8 | |||
| 562b998366 | |||
| bb04446285 | |||
| bfd75056b0 | |||
| fc74132598 | |||
| 933841d903 | |||
| 6d0aa73981 | |||
| b0b9b8091b | |||
| 53c8f700f4 | |||
| 901dde0e45 | |||
| e239a4a20f | |||
| fecaed0ed4 | |||
| d86b49ac86 | |||
| 45ab8bf60e | |||
| 97c30b73d5 | |||
| d5e60e5b7a | |||
| a1359b970c | |||
| 28f7ca1f80 | |||
| a368b87791 | |||
| f94f1c6016 | |||
| c589862b78 | |||
| 5a49b793d9 | |||
| 4270d3da1b | |||
| b8fde43868 | |||
| 40acf6b52a | |||
| 47e9aea0fe | |||
| 5582bc4b23 | |||
| 856a63da4d | |||
| 1ef41b8337 | |||
| 00e9c4cc96 | |||
| 189ff9b664 | |||
| e384ae2b9d | |||
| d8923270e6 | |||
| 5652f54ac2 | |||
| 7e7fc53da5 | |||
| 715534800a | |||
| 339e556feb | |||
| 5c18825a18 | |||
| 3e3e145497 | |||
| 47975ed53e | |||
| ab05280666 | |||
| b8ff56896c | |||
| 9d0029e215 | |||
| 83dba0b67b | |||
| e24e19ce3b | |||
| fe02e45e48 | |||
| 88efc65bac | |||
| 8308170156 | |||
| 572dcfd1db | |||
| c4ef103447 | |||
| 3d47a7f8ab | |||
| 9ce36e3e4b | |||
| 39f426be65 | |||
| baf08ca1d4 | |||
| 3d87991f60 | |||
| ba4bce2581 | |||
| 634a3172d8 | |||
| 22ac004a7c | |||
| 912fdff899 | |||
| b3d83d68db | |||
| a7b4cfe919 | |||
| b219029c45 | |||
| aaedfc35a8 | |||
| c683c3d5a5 | |||
| 7060766490 | |||
| 75d5f98fd2 | |||
| 14e970c271 | |||
| 3566d27919 | |||
| fbd746bd06 | |||
| 6c41a8f5dc | |||
| e367ac469c | |||
| 9d0603148b | |||
| f2b300df6b | |||
| 7df303f5ad | |||
| d2cc6b101e | |||
| 39d72bcc7b | |||
| 770043eea2 | |||
| 7729ef7381 | |||
| 5c6ecf37e7 | |||
| b4f9464f90 | |||
| 822d6768eb | |||
| 7e6102ce74 | |||
| 3773ba44f0 | |||
| a80aa03bda | |||
| a6f412da01 | |||
| 6ec1ee9ec2 | |||
| 72622926e5 | |||
| f889e77b9c | |||
| beb03ec6c5 | |||
| 4fc9f9ef54 | |||
| d43dc48b34 | |||
| 0b524b0848 | |||
| 13936a9621 | |||
| ed4e542260 | |||
| 3a126e73dd | |||
| 7223886dc9 | |||
| 70c10caa06 | |||
| 077ad693e9 | |||
| 02d4087cb8 | |||
| 7c524d631e | |||
| 6f05ad72b4 | |||
| b90e29d52c | |||
| 58830807d1 | |||
| 328afb7097 | |||
| 0e918707dc | |||
| cb9db101c7 | |||
| 05c083520a | |||
| d7fd10568c | |||
| 84eb699082 | |||
| 00132b7a7a | |||
| 28ba345ecc | |||
| 009273dbdd | |||
| 836e513698 | |||
| a24f830604 | |||
| 44dd941efb | |||
| f2a3eb987e | |||
| 97091acb8c | |||
| 769bb643ce | |||
| c90119e543 | |||
| bfbe52ec39 | |||
| 4cc1bf81ee | |||
| ac27548b25 | |||
| c717d38573 | |||
| 6b763d04a9 | |||
| 7b6e474c9a | |||
| 632d711411 | |||
| c054b5ee64 | |||
| 27b0f86d36 | |||
| 57e54ec070 | |||
| ac42049c08 | |||
| 09ecf225e9 | |||
| edfd965ac8 | |||
| f0aeb7a814 | |||
| 46cc9dd2b5 | |||
| 6219ad7216 | |||
| 0b6122e96a | |||
| c244562cae | |||
| e1e2ab3482 | |||
| 35c52f2f3c | |||
| adb3ef6368 | |||
| ae152cec09 | |||
| 66b15f73f0 | |||
| a7fce6d917 | |||
| 067923d326 | |||
| 368670ac31 | |||
| 1383c7b87a | |||
| 6070b55443 | |||
| 2c9a3115b7 | |||
| 4fb56c7729 | |||
| e179c55490 | |||
| fec76a481d | |||
| 859c441776 | |||
| 0740e63e49 | |||
| 268c6cc160 | |||
| 1d7d01c080 | |||
| c4bc66886d | |||
| ba52fe69d5 | |||
| b1019d2a8e | |||
| 0227b4a940 | |||
| 490ebbdcf7 | |||
| b8009cb0da | |||
| bef0c629ca | |||
| 897d0841be | |||
| 2f869dc665 | |||
| 76be189b08 | |||
| f63ff536ad | |||
| a615499076 | |||
| dbecfcf321 | |||
| acc48a0cc9 | |||
| a1fe4ba9c9 | |||
| 0d46b17553 | |||
| a7ba27b1b4 | |||
| c4e9615691 | |||
| 9d381e7be9 | |||
| d6522e2873 | |||
| 71d597dad0 | |||
| 4bcddf6fc8 | |||
| 506ab34d0e | |||
| cd8980e1f4 | |||
| 123da5a2fa | |||
| 60a1bdcdac | |||
| e6cc6d237f | |||
| 5b78400e21 | |||
| 61cc3ee350 | |||
| dbbd94cb7a | |||
| 5fe0b378d8 | |||
| e848b54730 | |||
| c5b3d86a91 | |||
| 6b70760204 | |||
| 117ed92992 | |||
| b33a385091 | |||
| ed7549bb1a | |||
| 6a72d9aa52 | |||
| b59043bf8f | |||
| edc79acb3b | |||
| 5c82d3488f | |||
| 4acaa65068 | |||
| f289e6cfe4 | |||
| 9726b229cf | |||
| 1849aa7d39 | |||
| 43e0e8fa04 | |||
| f31154cb9d | |||
| 1b35d05d4b | |||
| 352e3ff998 | |||
| 8ad7e5b4f2 | |||
| 064d0a0b76 | |||
| 3b8b0e01bb | |||
| 76da9765b6 | |||
| e691fc0963 | |||
| 15d8b1266c | |||
| 3b469cb422 | |||
| 8ca767f13c | |||
| 74a24f0fe9 | |||
| ab49fafc04 | |||
| a9ab15174c | |||
| f7cd7392fd | |||
| e28d8bde0d | |||
| 44c985facd | |||
| 0201d86015 | |||
| 4cb489457f | |||
| 62b8eb43c1 | |||
| 5bc3d0cc5b | |||
| 183fedfed5 | |||
| 0e9825e252 | |||
| 2397f958f9 | |||
| c490f5ce87 | |||
| 8bb02c27e2 | |||
| 7d4b200e40 | |||
| 69dc010936 | |||
| 7322c314a6 | |||
| 936e813c84 | |||
| 699bc7e86e | |||
| 762ded9b1c | |||
| 7442956361 | |||
| 292140b921 | |||
| c57e9d946f | |||
| 2918b7d2a0 | |||
| 3fbceed8d2 | |||
| 6c2ee16c04 | |||
| 3821ecbf4a | |||
| e3fb4310d6 | |||
| bd404735a7 | |||
| 50e62a4cb4 | |||
| 273617b86d | |||
| 6b13f4cb3a | |||
| 2b644785f0 | |||
| c6bf1a400d | |||
| 92a782b108 | |||
| 6491575fd5 | |||
| ccb6947dc1 | |||
| e4f9dca018 | |||
| b87eb82b4f | |||
| d216e798af | |||
| 6135de2fa3 | |||
| b21d84b027 | |||
| ec07cf5a66 | |||
| 4fef5919a5 | |||
| 7fdbc47822 | |||
| dee3e45b93 | |||
| c82b74b996 | |||
| 5288913bdd | |||
| f773faa258 | |||
| 50b7e52a7f | |||
| 3f56ad5aff | |||
| c4bab2dc85 | |||
| 331db8cc02 | |||
| 83fb311ef7 | |||
| 8fe2c9d98e | |||
| ed6c8d37f4 | |||
| e468192e2f | |||
| 4ce237c880 | |||
| 9dd2c86033 | |||
| e0e5c7faf5 | |||
| 3b7cb7bf44 | |||
| 269e73b601 | |||
| d743f2f34e | |||
| d0efbd3cd1 | |||
| d5481cbe1b | |||
| c079d7ddff | |||
| b19786985d | |||
| 6847e30e1c | |||
| ab30651802 | |||
| a60ae1a505 | |||
| 64fd986376 | |||
| df759114c9 | |||
| 03de9686a7 | |||
| 3d5f291386 | |||
| 99b90edab1 | |||
| 1113f97f33 | |||
| 162ba383b0 | |||
| 6dacc79d39 | |||
| 36bca545ff | |||
| a4f980547f | |||
| eb91f6437e | |||
| 78462aad61 | |||
| 781124b0d1 | |||
| e5fe2bb5e8 | |||
| 0231ba291e | |||
| 0bab55d5d5 | |||
| 9113b50c96 | |||
| 175fce0a55 | |||
| e75c3f70aa | |||
| c0239e09e6 | |||
| cf86d23eff | |||
| 15b70338ba | |||
| fbe04423b6 | |||
| c22545aa40 | |||
| 3b23a846b6 | |||
| 8fa3a1f0d8 | |||
| c41f2bad69 | |||
| 64ce4dbd86 | |||
| b43b130f35 | |||
| 4703148f0c | |||
| 971c24687f | |||
| be54b16960 | |||
| d8e83de792 | |||
| 288be7b7ea | |||
| e891bb43d5 | |||
| 6ce1ee04fc | |||
| 7ed5bf706f | |||
| 708877958a | |||
| 99ae5ab883 | |||
| 1484d67de9 | |||
| 64b2a828c0 | |||
| 4f8b5f687c | |||
| d9184620f9 | |||
| dad3c7a485 | |||
| e296d5bef1 | |||
| c68b4eceed | |||
| 213981d8cb | |||
| 2b56e98892 | |||
| 3a00674cbf | |||
| d939d6fd02 | |||
| 0c2ff34815 | |||
| 08ff056c43 | |||
| 3deea56c07 | |||
| f56b8033f0 | |||
| 4d47f4985d | |||
| 59cefd4f98 | |||
| ddc2cc61a6 | |||
| 7e3070ae4f | |||
| 93e9971c54 | |||
| 092dacfd62 | |||
| e55d4c4ede | |||
| 603c513b35 | |||
| 7de1740490 | |||
| c9885903a1 | |||
| 7334bf6c21 | |||
| c888663f18 | |||
| 62d78aa37e | |||
| 24ed0b9346 | |||
| f6081f2255 | |||
| 8d6a118aee | |||
| 06716d7536 | |||
| c946bb51a6 | |||
| 98dc30b21e | |||
| eae5d3819d | |||
| c7b2808ed7 | |||
| 7c59e32d47 | |||
| ada0d8fec7 | |||
| fcc706343f | |||
| 181075635d | |||
| ebd2cb8d74 | |||
| 483cbc36a9 | |||
| 24d8068982 | |||
| 32da75486b | |||
| 45709d7532 | |||
| b407972e27 | |||
| c2ea5aef77 | |||
| de713fa9b4 | |||
| c304593d8f | |||
| 12e892e174 | |||
| 411981a080 | |||
| 716cc1c4d9 | |||
| a8e071c690 | |||
| 0a4fb0da57 | |||
| edfe91c36e | |||
| 7766ce66dd | |||
| 7f00a36e27 | |||
| e4b46d86ce | |||
| 939cf29157 | |||
| 0f40e8d6a6 | |||
| 0e1e8128bf | |||
| 909d4f1af2 | |||
| 14f0e8e557 | |||
| 34d706a0e1 | |||
| dc8e0019b7 | |||
| 68ab9599ce | |||
| f7e2ac01ea | |||
| 4d8c4337ae | |||
| 3359955622 | |||
| 29b7b30eaa | |||
| 7d2001aa44 | |||
| 16a1f338c4 | |||
| 92e0ad5aba | |||
| 4e6edc3274 | |||
| f55b60b9ee | |||
| 8bd9118294 | |||
| 3e847449ad | |||
| aad3a54e9c | |||
| 40dbda6871 | |||
| 7388c83b60 | |||
| 9727723243 | |||
| 9710b68dbc | |||
| 15ebd67d4e | |||
| e6e5f19257 | |||
| a432b3d466 | |||
| c5407f343f | |||
| 335f57baf8 | |||
| 326944d627 | |||
| d82e5deeb1 | |||
| a59abedfb5 | |||
| 2ef5e0de87 | |||
| 9ce37af99b | |||
| a40955f071 | |||
| 3763f8944d | |||
| f964753090 | |||
| 868de8d1d7 | |||
| 64e0adda81 | |||
| 382e2d1e50 | |||
| a6f2511811 | |||
| 4447f270b2 | |||
| 33d3db5c43 | |||
| 965f172de6 | |||
| f12007e421 | |||
| b860e47cf5 | |||
| 7220d47a1c | |||
| 8415a38b23 | |||
| 96c4d3d988 | |||
| 34858ae1d9 | |||
| 80684f6f86 | |||
| 9e363703d6 | |||
| cc6cd430f7 | |||
| 8289646d4e | |||
| 5076a5daa7 | |||
| 16af9ff7b0 | |||
| b3f9e9451b | |||
| 44e9ddd7fe | |||
| cad88e19de | |||
| c6de625229 | |||
| ff276fc00c | |||
| a64736dc23 | |||
| 460d9afd45 | |||
| 277c77f1c5 | |||
| 659af2cbd0 | |||
| 2d6a53490d | |||
| 35e6baab37 | |||
| 5e1207b8ad | |||
| bcc9e93e6f | |||
| f9cde97b31 | |||
| e02ce4dc79 | |||
| 5c08c8c273 | |||
| 784c0ed89a | |||
| a3a604cefb | |||
| ee0308f79d | |||
| 2d07f945ad | |||
| 6b8d227092 | |||
| 122d5c52ac | |||
| 2647ac3294 | |||
| cf44d98392 | |||
| a3274ac40b | |||
| 826496580b | |||
| de5e5682a1 | |||
| 312fdd7752 | |||
| cdf0f2fec3 | |||
| 8f97f6c57f | |||
| 466a96543a | |||
| c198ff5f1f | |||
| 592d1e3aae | |||
| f836130bff | |||
| c0c7ff5751 | |||
| 48a58646e8 | |||
| 2576a5c6db | |||
| a92b6dc3c1 | |||
| 2a329c6186 | |||
| 45d21502f0 | |||
| 98f5c7864f | |||
| c8bd026ef6 | |||
| 19ef2b0a66 | |||
| d0f591051c | |||
| 4a210c9fc6 | |||
| 0c5a4fe9c9 | |||
| 372a5c1cee | |||
| 96592b544b | |||
| 4cda86b08f | |||
| 1eba8b9d96 | |||
| 314bc6bb4e | |||
| c4fe56dcc0 | |||
| 8de1faea6f | |||
| d0adab2c39 | |||
| a309459b92 | |||
| 9e7bc51b95 | |||
| 69749f3fc3 | |||
| f1433db4f1 | |||
| 077a5b0dc4 | |||
| 2bcda8d00c | |||
| 94247ad6cb | |||
| 49a77ac16f | |||
| 3bf3f9596f | |||
| 3fc63f126d | |||
| 00c7fd2b79 | |||
| fa37b4da77 | |||
| 5289b4b9e0 | |||
| 275179a003 | |||
| 366a3b0285 | |||
| 701bd59b8b | |||
| 303b5e2b92 | |||
| 0198399d84 | |||
| 50fa92c026 | |||
| 0efc4ab632 | |||
| ea9dbea9d5 | |||
| ce86336545 | |||
| d1b6979aa5 | |||
| 101ab4dd8e | |||
| 41089bc7d3 | |||
| 0a8b4d65be | |||
| 968c1b44cb | |||
| 96c2b77f0f | |||
| e211785ada | |||
| 18c8aef9d3 | |||
| 74dbba64bc | |||
| db98a4a48b | |||
| 3ae8c8be1e | |||
| e89520175d | |||
| 74f7906db4 | |||
| 365fb34c6c | |||
| cd110835a0 | |||
| 2dee86319d | |||
| 80f53f7380 | |||
| e79ceb1533 | |||
| 1f5fc95b68 | |||
| c30139a013 | |||
| 87b9ec3843 | |||
| 3963d57c89 | |||
| ed8fad7390 | 
@ -1,29 +1,100 @@
 | 
			
		||||
version: 2
 | 
			
		||||
jobs:
 | 
			
		||||
    build_py3:
 | 
			
		||||
        working_directory: ~/pytorch-pretrained-BERT
 | 
			
		||||
    build_py3_torch_and_tf:
 | 
			
		||||
        working_directory: ~/transformers
 | 
			
		||||
        docker:
 | 
			
		||||
            - image: circleci/python:3.5
 | 
			
		||||
        resource_class: xlarge
 | 
			
		||||
        parallelism: 1
 | 
			
		||||
        steps:
 | 
			
		||||
            - checkout
 | 
			
		||||
            - run: sudo pip install torch
 | 
			
		||||
            - run: sudo pip install tensorflow==2.0.0-rc0
 | 
			
		||||
            - run: sudo pip install --progress-bar off .
 | 
			
		||||
            - run: sudo pip install pytest ftfy spacy
 | 
			
		||||
            - run: sudo python -m spacy download en
 | 
			
		||||
            - run: python -m pytest -sv tests/ --runslow
 | 
			
		||||
    build_py2:
 | 
			
		||||
        working_directory: ~/pytorch-pretrained-BERT
 | 
			
		||||
            - run: sudo pip install pytest codecov pytest-cov
 | 
			
		||||
            - run: sudo pip install tensorboardX scikit-learn
 | 
			
		||||
            - run: python -m pytest -sv ./transformers/tests/ --cov
 | 
			
		||||
            - run: codecov
 | 
			
		||||
    build_py3_torch:
 | 
			
		||||
        working_directory: ~/transformers
 | 
			
		||||
        docker:
 | 
			
		||||
            - image: circleci/python:3.5
 | 
			
		||||
        resource_class: xlarge
 | 
			
		||||
        parallelism: 1
 | 
			
		||||
        steps:
 | 
			
		||||
            - checkout
 | 
			
		||||
            - run: sudo pip install torch
 | 
			
		||||
            - run: sudo pip install --progress-bar off .
 | 
			
		||||
            - run: sudo pip install pytest codecov pytest-cov
 | 
			
		||||
            - run: sudo pip install tensorboardX scikit-learn
 | 
			
		||||
            - run: python -m pytest -sv ./transformers/tests/ --cov
 | 
			
		||||
            - run: python -m pytest -sv ./examples/
 | 
			
		||||
            - run: codecov
 | 
			
		||||
    build_py3_tf:
 | 
			
		||||
        working_directory: ~/transformers
 | 
			
		||||
        docker:
 | 
			
		||||
            - image: circleci/python:3.5
 | 
			
		||||
        resource_class: xlarge
 | 
			
		||||
        parallelism: 1
 | 
			
		||||
        steps:
 | 
			
		||||
            - checkout
 | 
			
		||||
            - run: sudo pip install tensorflow==2.0.0-rc0
 | 
			
		||||
            - run: sudo pip install --progress-bar off .
 | 
			
		||||
            - run: sudo pip install pytest codecov pytest-cov
 | 
			
		||||
            - run: sudo pip install tensorboardX scikit-learn
 | 
			
		||||
            - run: python -m pytest -sv ./transformers/tests/ --cov
 | 
			
		||||
            - run: codecov
 | 
			
		||||
    build_py2_torch:
 | 
			
		||||
        working_directory: ~/transformers
 | 
			
		||||
        resource_class: large
 | 
			
		||||
        parallelism: 1
 | 
			
		||||
        docker:
 | 
			
		||||
            - image: circleci/python:2.7
 | 
			
		||||
        steps:
 | 
			
		||||
            - checkout
 | 
			
		||||
            - run: sudo pip install torch
 | 
			
		||||
            - run: sudo pip install --progress-bar off .
 | 
			
		||||
            - run: sudo pip install pytest spacy
 | 
			
		||||
            - run: sudo pip install ftfy==4.4.3
 | 
			
		||||
            - run: sudo python -m spacy download en
 | 
			
		||||
            - run: python -m pytest -sv tests/ --runslow
 | 
			
		||||
            - run: sudo pip install pytest codecov pytest-cov
 | 
			
		||||
            - run: python -m pytest -sv ./transformers/tests/ --cov
 | 
			
		||||
            - run: codecov
 | 
			
		||||
    build_py2_tf:
 | 
			
		||||
        working_directory: ~/transformers
 | 
			
		||||
        resource_class: large
 | 
			
		||||
        parallelism: 1
 | 
			
		||||
        docker:
 | 
			
		||||
            - image: circleci/python:2.7
 | 
			
		||||
        steps:
 | 
			
		||||
            - checkout
 | 
			
		||||
            - run: sudo pip install tensorflow==2.0.0-rc0
 | 
			
		||||
            - run: sudo pip install --progress-bar off .
 | 
			
		||||
            - run: sudo pip install pytest codecov pytest-cov
 | 
			
		||||
            - run: python -m pytest -sv ./transformers/tests/ --cov
 | 
			
		||||
            - run: codecov
 | 
			
		||||
    deploy_doc:
 | 
			
		||||
        working_directory: ~/transformers
 | 
			
		||||
        docker:
 | 
			
		||||
            - image: circleci/python:3.5
 | 
			
		||||
        steps:
 | 
			
		||||
            - add_ssh_keys:
 | 
			
		||||
                  fingerprints:
 | 
			
		||||
                      - "5b:7a:95:18:07:8c:aa:76:4c:60:35:88:ad:60:56:71"
 | 
			
		||||
            - checkout
 | 
			
		||||
            - run: sudo pip install --progress-bar off -r docs/requirements.txt
 | 
			
		||||
            - run: sudo pip install --progress-bar off -r requirements.txt
 | 
			
		||||
            - run: cd docs/source && ln -s ../../examples/README.md examples.md && cd -
 | 
			
		||||
            - run: cd docs && make clean && make html && scp -r -oStrictHostKeyChecking=no _build/html/* $doc:$dir
 | 
			
		||||
workflow_filters: &workflow_filters
 | 
			
		||||
    filters:
 | 
			
		||||
        branches:
 | 
			
		||||
            only:
 | 
			
		||||
                - master
 | 
			
		||||
workflows:
 | 
			
		||||
  version: 2
 | 
			
		||||
  build_and_test:
 | 
			
		||||
    jobs:
 | 
			
		||||
      - build_py3
 | 
			
		||||
      - build_py2
 | 
			
		||||
    version: 2
 | 
			
		||||
    build_and_test:
 | 
			
		||||
        jobs:
 | 
			
		||||
            - build_py3_torch_and_tf
 | 
			
		||||
            - build_py3_torch
 | 
			
		||||
            - build_py3_tf
 | 
			
		||||
            - build_py2_torch
 | 
			
		||||
            - build_py2_tf
 | 
			
		||||
            - deploy_doc: *workflow_filters
 | 
			
		||||
							
								
								
									
										12
									
								
								.coveragerc
									
									
									
									
									
										Normal file
									
								
							
							
						
						@ -0,0 +1,12 @@
 | 
			
		||||
[run]
 | 
			
		||||
source=transformers
 | 
			
		||||
omit =
 | 
			
		||||
    # skip convertion scripts from testing for now
 | 
			
		||||
    */convert_*
 | 
			
		||||
    */__main__.py
 | 
			
		||||
[report]
 | 
			
		||||
exclude_lines =
 | 
			
		||||
    pragma: no cover
 | 
			
		||||
    raise
 | 
			
		||||
    except
 | 
			
		||||
    register_parameter
 | 
			
		||||
							
								
								
									
										48
									
								
								.github/ISSUE_TEMPLATE/bug-report.md
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						@ -0,0 +1,48 @@
 | 
			
		||||
---
 | 
			
		||||
name: "\U0001F41B Bug Report"
 | 
			
		||||
about: Submit a bug report to help us improve PyTorch Transformers
 | 
			
		||||
---
 | 
			
		||||
 | 
			
		||||
## 🐛 Bug
 | 
			
		||||
 | 
			
		||||
<!-- Important information -->
 | 
			
		||||
 | 
			
		||||
Model I am using (Bert, XLNet....):
 | 
			
		||||
 | 
			
		||||
Language I am using the model on (English, Chinese....):
 | 
			
		||||
 | 
			
		||||
The problem arise when using:
 | 
			
		||||
* [ ] the official example scripts: (give details)
 | 
			
		||||
* [ ] my own modified scripts: (give details)
 | 
			
		||||
 | 
			
		||||
The tasks I am working on is:
 | 
			
		||||
* [ ] an official GLUE/SQUaD task: (give the name)
 | 
			
		||||
* [ ] my own task or dataset: (give details)
 | 
			
		||||
 | 
			
		||||
## To Reproduce
 | 
			
		||||
 | 
			
		||||
Steps to reproduce the behavior:
 | 
			
		||||
 | 
			
		||||
1.
 | 
			
		||||
2.
 | 
			
		||||
3.
 | 
			
		||||
 | 
			
		||||
<!-- If you have a code sample, error messages, stack traces, please provide it here as well. -->
 | 
			
		||||
 | 
			
		||||
## Expected behavior
 | 
			
		||||
 | 
			
		||||
<!-- A clear and concise description of what you expected to happen. -->
 | 
			
		||||
 | 
			
		||||
## Environment
 | 
			
		||||
 | 
			
		||||
* OS:
 | 
			
		||||
* Python version:
 | 
			
		||||
* PyTorch version:
 | 
			
		||||
* PyTorch Transformers version (or branch):
 | 
			
		||||
* Using GPU ?
 | 
			
		||||
* Distributed of parallel setup ?
 | 
			
		||||
* Any other relevant information:
 | 
			
		||||
 | 
			
		||||
## Additional context
 | 
			
		||||
 | 
			
		||||
<!-- Add any other context about the problem here. -->
 | 
			
		||||
							
								
								
									
										16
									
								
								.github/ISSUE_TEMPLATE/feature-request.md
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						@ -0,0 +1,16 @@
 | 
			
		||||
---
 | 
			
		||||
name: "\U0001F680 Feature Request"
 | 
			
		||||
about: Submit a proposal/request for a new PyTorch Transformers feature
 | 
			
		||||
---
 | 
			
		||||
 | 
			
		||||
## 🚀 Feature
 | 
			
		||||
 | 
			
		||||
<!-- A clear and concise description of the feature proposal. Please provide a link to the paper and code in case they exist. -->
 | 
			
		||||
 | 
			
		||||
## Motivation
 | 
			
		||||
 | 
			
		||||
<!-- Please outline the motivation for the proposal. Is your feature request related to a problem? e.g., I'm always frustrated when [...]. If this is related to another GitHub issue, please link here too. -->
 | 
			
		||||
 | 
			
		||||
## Additional context
 | 
			
		||||
 | 
			
		||||
<!-- Add any other context or screenshots about the feature request here. -->
 | 
			
		||||
							
								
								
									
										43
									
								
								.github/ISSUE_TEMPLATE/migration.md
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						@ -0,0 +1,43 @@
 | 
			
		||||
---
 | 
			
		||||
name: "\U0001F4DA Migration from PyTorch-pretrained-Bert"
 | 
			
		||||
about: Report a problem when migrating from PyTorch-pretrained-Bert to Transformers
 | 
			
		||||
---
 | 
			
		||||
 | 
			
		||||
## 📚 Migration
 | 
			
		||||
 | 
			
		||||
<!-- Important information -->
 | 
			
		||||
 | 
			
		||||
Model I am using (Bert, XLNet....):
 | 
			
		||||
 | 
			
		||||
Language I am using the model on (English, Chinese....):
 | 
			
		||||
 | 
			
		||||
The problem arise when using:
 | 
			
		||||
* [ ] the official example scripts: (give details)
 | 
			
		||||
* [ ] my own modified scripts: (give details)
 | 
			
		||||
 | 
			
		||||
The tasks I am working on is:
 | 
			
		||||
* [ ] an official GLUE/SQUaD task: (give the name)
 | 
			
		||||
* [ ] my own task or dataset: (give details)
 | 
			
		||||
 | 
			
		||||
Details of the issue:
 | 
			
		||||
 | 
			
		||||
<!-- A clear and concise description of the migration issue. If you have code snippets, please provide it here as well. -->
 | 
			
		||||
 | 
			
		||||
## Environment
 | 
			
		||||
 | 
			
		||||
* OS:
 | 
			
		||||
* Python version:
 | 
			
		||||
* PyTorch version:
 | 
			
		||||
* PyTorch Transformers version (or branch):
 | 
			
		||||
* Using GPU ?
 | 
			
		||||
* Distributed of parallel setup ?
 | 
			
		||||
* Any other relevant information:
 | 
			
		||||
 | 
			
		||||
## Checklist
 | 
			
		||||
 | 
			
		||||
- [ ] I have read the migration guide in the readme.
 | 
			
		||||
- [ ] I checked if a related official extension example runs on my machine.
 | 
			
		||||
 | 
			
		||||
## Additional context
 | 
			
		||||
 | 
			
		||||
<!-- Add any other context about the problem here. -->
 | 
			
		||||
							
								
								
									
										8
									
								
								.github/ISSUE_TEMPLATE/question-help.md
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						@ -0,0 +1,8 @@
 | 
			
		||||
---
 | 
			
		||||
name: "❓Questions & Help"
 | 
			
		||||
about: Start a general discussion related to PyTorch Transformers
 | 
			
		||||
---
 | 
			
		||||
 | 
			
		||||
## ❓ Questions & Help
 | 
			
		||||
 | 
			
		||||
<!-- A clear and concise description of the question. -->
 | 
			
		||||
							
								
								
									
										11
									
								
								.gitignore
									
									
									
									
										vendored
									
									
								
							
							
						
						@ -122,4 +122,13 @@ dmypy.json
 | 
			
		||||
tensorflow_code
 | 
			
		||||
 | 
			
		||||
# Models
 | 
			
		||||
models
 | 
			
		||||
models
 | 
			
		||||
proc_data
 | 
			
		||||
 | 
			
		||||
# examples
 | 
			
		||||
runs
 | 
			
		||||
examples/runs
 | 
			
		||||
 | 
			
		||||
# data
 | 
			
		||||
/data
 | 
			
		||||
serialization_dir
 | 
			
		||||
@ -2,6 +2,6 @@ FROM pytorch/pytorch:latest
 | 
			
		||||
 | 
			
		||||
RUN git clone https://github.com/NVIDIA/apex.git && cd apex && python setup.py install --cuda_ext --cpp_ext
 | 
			
		||||
 | 
			
		||||
RUN pip install pytorch-pretrained-bert
 | 
			
		||||
RUN pip install transformers
 | 
			
		||||
 | 
			
		||||
WORKDIR /workspace
 | 
			
		||||
							
								
								
									
										19
									
								
								docs/Makefile
									
									
									
									
									
										Normal file
									
								
							
							
						
						@ -0,0 +1,19 @@
 | 
			
		||||
# Minimal makefile for Sphinx documentation
 | 
			
		||||
#
 | 
			
		||||
 | 
			
		||||
# You can set these variables from the command line.
 | 
			
		||||
SPHINXOPTS    =
 | 
			
		||||
SPHINXBUILD   = sphinx-build
 | 
			
		||||
SOURCEDIR     = source
 | 
			
		||||
BUILDDIR      = _build
 | 
			
		||||
 | 
			
		||||
# Put it first so that "make" without argument is like "make help".
 | 
			
		||||
help:
 | 
			
		||||
	@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
 | 
			
		||||
 | 
			
		||||
.PHONY: help Makefile
 | 
			
		||||
 | 
			
		||||
# Catch-all target: route all unknown targets to Sphinx using the new
 | 
			
		||||
# "make mode" option.  $(O) is meant as a shortcut for $(SPHINXOPTS).
 | 
			
		||||
%: Makefile
 | 
			
		||||
	@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
 | 
			
		||||
							
								
								
									
										67
									
								
								docs/README.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						@ -0,0 +1,67 @@
 | 
			
		||||
# Generating the documentation
 | 
			
		||||
 | 
			
		||||
To generate the documentation, you first have to build it. Several packages are necessary to build the doc,
 | 
			
		||||
you can install them using:
 | 
			
		||||
 | 
			
		||||
```bash
 | 
			
		||||
pip install -r requirements.txt
 | 
			
		||||
```
 | 
			
		||||
 
 | 
			
		||||
## Packages installed
 | 
			
		||||
 | 
			
		||||
Here's an overview of all the packages installed. If you ran the previous command installing all packages from 
 | 
			
		||||
`requirements.txt`, you do not need to run the following commands.
 | 
			
		||||
 | 
			
		||||
Building it requires the package `sphinx` that you can 
 | 
			
		||||
install using:
 | 
			
		||||
 | 
			
		||||
```bash
 | 
			
		||||
pip install -U sphinx
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
You would also need the custom installed [theme](https://github.com/readthedocs/sphinx_rtd_theme) by 
 | 
			
		||||
[Read The Docs](https://readthedocs.org/). You can install it using the following command:
 | 
			
		||||
 | 
			
		||||
```bash
 | 
			
		||||
pip install sphinx_rtd_theme
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
The third necessary package is the `recommonmark` package to accept Markdown as well as Restructured text:
 | 
			
		||||
 | 
			
		||||
```bash
 | 
			
		||||
pip install recommonmark
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
## Building the documentation
 | 
			
		||||
 | 
			
		||||
Make sure that there is a symlink from the `example` file (in /examples) inside the source folder. Run the followig 
 | 
			
		||||
command to generate it:
 | 
			
		||||
 | 
			
		||||
```bash
 | 
			
		||||
ln -s ../../examples/README.md source/examples.md
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
Once you have setup `sphinx`, you can build the documentation by running the following command in the `/docs` folder:
 | 
			
		||||
 | 
			
		||||
```bash
 | 
			
		||||
make html
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
---
 | 
			
		||||
**NOTE**
 | 
			
		||||
 | 
			
		||||
If you are adding/removing elements from the toc-tree or from any strutural item, it is recommended to clean the build
 | 
			
		||||
directory before rebuilding. Run the following command to clean and build:
 | 
			
		||||
 | 
			
		||||
```bash
 | 
			
		||||
make clean && make html
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
---
 | 
			
		||||
 | 
			
		||||
It should build the static app that will be available under `/docs/_build/html`
 | 
			
		||||
 | 
			
		||||
## Adding a new element to the tree (toc-tree)
 | 
			
		||||
 | 
			
		||||
Accepted files are reStructuredText (.rst) and Markdown (.md). Create a file with its extension and put it
 | 
			
		||||
in the source directory. You can then link it to the toc-tree by putting the filename without the extension.
 | 
			
		||||
							
								
								
									
										29
									
								
								docs/requirements.txt
									
									
									
									
									
										Normal file
									
								
							
							
						
						@ -0,0 +1,29 @@
 | 
			
		||||
alabaster==0.7.12
 | 
			
		||||
Babel==2.7.0
 | 
			
		||||
certifi==2019.6.16
 | 
			
		||||
chardet==3.0.4
 | 
			
		||||
commonmark==0.9.0
 | 
			
		||||
docutils==0.14
 | 
			
		||||
future==0.17.1
 | 
			
		||||
idna==2.8
 | 
			
		||||
imagesize==1.1.0
 | 
			
		||||
Jinja2==2.10.1
 | 
			
		||||
MarkupSafe==1.1.1
 | 
			
		||||
packaging==19.0
 | 
			
		||||
Pygments==2.4.2
 | 
			
		||||
pyparsing==2.4.0
 | 
			
		||||
pytz==2019.1
 | 
			
		||||
recommonmark==0.5.0
 | 
			
		||||
requests==2.22.0
 | 
			
		||||
six==1.12.0
 | 
			
		||||
snowballstemmer==1.9.0
 | 
			
		||||
Sphinx==2.1.2
 | 
			
		||||
sphinx-rtd-theme==0.4.3
 | 
			
		||||
sphinxcontrib-applehelp==1.0.1
 | 
			
		||||
sphinxcontrib-devhelp==1.0.1
 | 
			
		||||
sphinxcontrib-htmlhelp==1.0.2
 | 
			
		||||
sphinxcontrib-jsmath==1.0.1
 | 
			
		||||
sphinxcontrib-qthelp==1.0.2
 | 
			
		||||
sphinxcontrib-serializinghtml==1.1.3
 | 
			
		||||
urllib3==1.25.3
 | 
			
		||||
sphinx-markdown-tables==0.0.9
 | 
			
		||||
							
								
								
									
										
											BIN
										
									
								
								docs/source/_static/css/Calibre-Light.ttf
									
									
									
									
									
										Normal file
									
								
							
							
						
						
							
								
								
									
										
											BIN
										
									
								
								docs/source/_static/css/Calibre-Medium.otf
									
									
									
									
									
										Normal file
									
								
							
							
						
						
							
								
								
									
										
											BIN
										
									
								
								docs/source/_static/css/Calibre-Regular.otf
									
									
									
									
									
										Normal file
									
								
							
							
						
						
							
								
								
									
										
											BIN
										
									
								
								docs/source/_static/css/Calibre-Thin.otf
									
									
									
									
									
										Normal file
									
								
							
							
						
						
							
								
								
									
										12
									
								
								docs/source/_static/css/code-snippets.css
									
									
									
									
									
										Normal file
									
								
							
							
						
						@ -0,0 +1,12 @@
 | 
			
		||||
 | 
			
		||||
.highlight .c1, .highlight .sd{
 | 
			
		||||
    color: #999
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
.highlight .nn, .highlight .k, .highlight .s1, .highlight .nb, .highlight .bp, .highlight .kc {
 | 
			
		||||
    color: #FB8D68;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
.highlight .kn, .highlight .nv, .highlight .s2, .highlight .ow {
 | 
			
		||||
    color: #6670FF;
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										199
									
								
								docs/source/_static/css/huggingface.css
									
									
									
									
									
										Normal file
									
								
							
							
						
						@ -0,0 +1,199 @@
 | 
			
		||||
huggingface.css
 | 
			
		||||
 | 
			
		||||
/* The literal code blocks */
 | 
			
		||||
.rst-content tt.literal, .rst-content tt.literal, .rst-content code.literal {
 | 
			
		||||
    color: #6670FF;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/* To keep the logo centered */
 | 
			
		||||
.wy-side-scroll {
 | 
			
		||||
    width: auto;
 | 
			
		||||
    font-size: 20px;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/* The div that holds the Hugging Face logo */
 | 
			
		||||
.HuggingFaceDiv {
 | 
			
		||||
    width: 100%
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/* The research field on top of the toc tree */
 | 
			
		||||
.wy-side-nav-search{
 | 
			
		||||
    background-color: #6670FF;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/* The toc tree */
 | 
			
		||||
.wy-nav-side{
 | 
			
		||||
    background-color: #6670FF;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/* The selected items in the toc tree */
 | 
			
		||||
.wy-menu-vertical li.current{
 | 
			
		||||
    background-color: #A6B0FF;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/* When a list item that does belong to the selected block from the toc tree is hovered */
 | 
			
		||||
.wy-menu-vertical li.current a:hover{
 | 
			
		||||
    background-color: #B6C0FF;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/* When a list item that does NOT belong to the selected block from the toc tree is hovered. */
 | 
			
		||||
.wy-menu-vertical li a:hover{
 | 
			
		||||
    background-color: #A7AFFB;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/* The text items on the toc tree */
 | 
			
		||||
.wy-menu-vertical a {
 | 
			
		||||
    color: #FFFFDD;
 | 
			
		||||
    font-family: Calibre-Light;
 | 
			
		||||
}
 | 
			
		||||
.wy-menu-vertical header, .wy-menu-vertical p.caption{
 | 
			
		||||
    color: white;
 | 
			
		||||
    font-family: Calibre-Light;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/* The color inside the selected toc tree block */
 | 
			
		||||
.wy-menu-vertical li.toctree-l2 a, .wy-menu-vertical li.toctree-l3 a, .wy-menu-vertical li.toctree-l4 a {
 | 
			
		||||
    color: black;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/* Inside the depth-2 selected toc tree block */
 | 
			
		||||
.wy-menu-vertical li.toctree-l2.current>a {
 | 
			
		||||
    background-color: #B6C0FF
 | 
			
		||||
}
 | 
			
		||||
.wy-menu-vertical li.toctree-l2.current li.toctree-l3>a {
 | 
			
		||||
    background-color: #C6D0FF
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/* Inside the depth-3 selected toc tree block */
 | 
			
		||||
.wy-menu-vertical li.toctree-l3.current li.toctree-l4>a{
 | 
			
		||||
    background-color: #D6E0FF
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/* Inside code snippets */
 | 
			
		||||
.rst-content dl:not(.docutils) dt{
 | 
			
		||||
    font-size: 15px;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/* Links */
 | 
			
		||||
a {
 | 
			
		||||
    color: #6670FF;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/* Content bars */
 | 
			
		||||
.rst-content dl:not(.docutils) dt {
 | 
			
		||||
    background-color: rgba(251, 141, 104, 0.1);
 | 
			
		||||
    border-right: solid 2px #FB8D68;
 | 
			
		||||
    border-left: solid 2px #FB8D68;
 | 
			
		||||
    color: #FB8D68;
 | 
			
		||||
    font-family: Calibre-Light;
 | 
			
		||||
    border-top: none;
 | 
			
		||||
    font-style: normal !important;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/* Expand button */
 | 
			
		||||
.wy-menu-vertical li.toctree-l2 span.toctree-expand,
 | 
			
		||||
.wy-menu-vertical li.on a span.toctree-expand, .wy-menu-vertical li.current>a span.toctree-expand,
 | 
			
		||||
.wy-menu-vertical li.toctree-l3 span.toctree-expand{
 | 
			
		||||
    color: black;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/* Max window size */
 | 
			
		||||
.wy-nav-content{
 | 
			
		||||
    max-width: 1200px;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/* Mobile header */
 | 
			
		||||
.wy-nav-top{
 | 
			
		||||
    background-color: #6670FF;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
/* Source spans */
 | 
			
		||||
.rst-content .viewcode-link, .rst-content .viewcode-back{
 | 
			
		||||
    color: #6670FF;
 | 
			
		||||
    font-size: 110%;
 | 
			
		||||
    letter-spacing: 2px;
 | 
			
		||||
    text-transform: uppercase;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/* It would be better for table to be visible without horizontal scrolling */
 | 
			
		||||
.wy-table-responsive table td, .wy-table-responsive table th{
 | 
			
		||||
    white-space: normal;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
.footer {
 | 
			
		||||
    margin-top: 20px;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
.footer__Social {
 | 
			
		||||
    display: flex;
 | 
			
		||||
    flex-direction: row;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
.footer__CustomImage {
 | 
			
		||||
    margin: 2px 5px 0 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/* class and method names in doc */
 | 
			
		||||
.rst-content dl:not(.docutils) tt.descname, .rst-content dl:not(.docutils) tt.descclassname, .rst-content dl:not(.docutils) tt.descname, .rst-content dl:not(.docutils) code.descname, .rst-content dl:not(.docutils) tt.descclassname, .rst-content dl:not(.docutils) code.descclassname{
 | 
			
		||||
    font-family: Calibre;
 | 
			
		||||
    font-size: 20px !important;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/* class name in doc*/
 | 
			
		||||
.rst-content dl:not(.docutils) tt.descname, .rst-content dl:not(.docutils) tt.descname, .rst-content dl:not(.docutils) code.descname{
 | 
			
		||||
    margin-right: 10px;
 | 
			
		||||
    font-family: Calibre-Medium;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/* Method and class parameters */
 | 
			
		||||
.sig-param{
 | 
			
		||||
    line-height: 23px;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/* Class introduction "class" string at beginning */
 | 
			
		||||
.rst-content dl:not(.docutils) .property{
 | 
			
		||||
    font-size: 18px;
 | 
			
		||||
    color: black;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
/* FONTS */
 | 
			
		||||
body{
 | 
			
		||||
    font-family: Calibre;
 | 
			
		||||
    font-size: 16px;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
h1 {
 | 
			
		||||
    font-family: Calibre-Thin;
 | 
			
		||||
    font-size: 70px;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
h2, .rst-content .toctree-wrapper p.caption, h3, h4, h5, h6, legend{
 | 
			
		||||
    font-family: Calibre-Medium;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@font-face {
 | 
			
		||||
    font-family: Calibre-Medium;
 | 
			
		||||
    src: url(./Calibre-Medium.otf);
 | 
			
		||||
    font-weight:400;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@font-face {
 | 
			
		||||
    font-family: Calibre;
 | 
			
		||||
    src: url(./Calibre-Regular.otf);
 | 
			
		||||
    font-weight:400;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@font-face {
 | 
			
		||||
    font-family: Calibre-Light;
 | 
			
		||||
    src: url(./Calibre-Light.ttf);
 | 
			
		||||
    font-weight:400;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@font-face {
 | 
			
		||||
    font-family: Calibre-Thin;
 | 
			
		||||
    src: url(./Calibre-Thin.otf);
 | 
			
		||||
    font-weight:400;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										54
									
								
								docs/source/_static/js/custom.js
									
									
									
									
									
										Normal file
									
								
							
							
						
						@ -0,0 +1,54 @@
 | 
			
		||||
function addIcon() {
 | 
			
		||||
    const huggingFaceLogo = "http://lysand.re/huggingface_logo.svg";
 | 
			
		||||
    const image = document.createElement("img");
 | 
			
		||||
    image.setAttribute("src", huggingFaceLogo);
 | 
			
		||||
 | 
			
		||||
    const div = document.createElement("div");
 | 
			
		||||
    div.appendChild(image);
 | 
			
		||||
    div.style.textAlign = 'center';
 | 
			
		||||
    div.style.paddingTop = '30px';
 | 
			
		||||
    div.style.backgroundColor = '#6670FF';
 | 
			
		||||
 | 
			
		||||
    const scrollDiv = document.getElementsByClassName("wy-side-scroll")[0];
 | 
			
		||||
    scrollDiv.prepend(div);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
function addCustomFooter() {
 | 
			
		||||
    const customFooter = document.createElement("div");
 | 
			
		||||
    const questionOrIssue = document.createElement("div");
 | 
			
		||||
    questionOrIssue.innerHTML = "Stuck? Read our <a href='https://medium.com/huggingface'>Blog posts</a> or <a href='https://github.com/huggingface/transformers'>Create an issue</a>";
 | 
			
		||||
    customFooter.appendChild(questionOrIssue);
 | 
			
		||||
    customFooter.classList.add("footer");
 | 
			
		||||
 | 
			
		||||
    const social = document.createElement("div");
 | 
			
		||||
    social.classList.add("footer__Social");
 | 
			
		||||
 | 
			
		||||
    const imageDetails = [
 | 
			
		||||
        { link: "https://huggingface.co", imageLink: "http://lysand.re/icons/website.svg" },
 | 
			
		||||
        { link: "https://twitter.com/huggingface", imageLink: "http://lysand.re/icons/twitter.svg" },
 | 
			
		||||
        { link: "https://github.com/huggingface", imageLink: "http://lysand.re/icons/github.svg" },
 | 
			
		||||
        { link: "https://www.linkedin.com/company/huggingface/", imageLink: "http://lysand.re/icons/linkedin.svg" }
 | 
			
		||||
    ];
 | 
			
		||||
 | 
			
		||||
    imageDetails.forEach(imageLinks => {
 | 
			
		||||
        const link = document.createElement("a");
 | 
			
		||||
        const image = document.createElement("img");
 | 
			
		||||
        image.src = imageLinks.imageLink;
 | 
			
		||||
        link.href = imageLinks.link;
 | 
			
		||||
        image.style.width = "30px";
 | 
			
		||||
        image.classList.add("footer__CustomImage");
 | 
			
		||||
        link.appendChild(image);
 | 
			
		||||
        social.appendChild(link);
 | 
			
		||||
    });
 | 
			
		||||
 | 
			
		||||
    customFooter.appendChild(social);
 | 
			
		||||
    document.getElementsByTagName("footer")[0].appendChild(customFooter);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
function onLoad() {
 | 
			
		||||
    addIcon();
 | 
			
		||||
    addCustomFooter();
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
window.addEventListener("load", onLoad);
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										47
									
								
								docs/source/_static/js/huggingface_logo.svg
									
									
									
									
									
										Normal file
									
								
							
							
						
						@ -0,0 +1,47 @@
 | 
			
		||||
<svg width="95px" height="88px" viewBox="0 0 95 88" version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
 | 
			
		||||
    <!-- Generator: Sketch 43.2 (39069) - http://www.bohemiancoding.com/sketch -->
 | 
			
		||||
    <title>icon</title>
 | 
			
		||||
    <desc>Created with Sketch.</desc>
 | 
			
		||||
    <defs>
 | 
			
		||||
        <path d="M13,14.7890193 C22.8284801,14.7890193 26,6.02605902 26,1.5261751 C26,-0.812484109 24.4279133,-0.0763570998 21.9099482,1.17020987 C19.5830216,2.32219957 16.4482998,3.91011313 13,3.91011313 C5.82029825,3.91011313 0,-2.97370882 0,1.5261751 C0,6.02605902 3.17151989,14.7890193 13,14.7890193 Z" id="path-1"></path>
 | 
			
		||||
    </defs>
 | 
			
		||||
    <g id="Page-1" stroke="none" stroke-width="1" fill="none" fill-rule="evenodd">
 | 
			
		||||
        <g id="icon_desktop">
 | 
			
		||||
            <g id="icon">
 | 
			
		||||
                <g id="icon_desktop">
 | 
			
		||||
                    <g id="Group-2">
 | 
			
		||||
                        <g id="Group">
 | 
			
		||||
                            <path d="M93.7930402,70.08 C94.5430402,72.24 94.3630402,74.54 93.3630402,76.54 C92.6430402,78 91.6130402,79.13 90.3530402,80.14 C88.8330402,81.34 86.9430402,82.36 84.6630402,83.34 C81.9430402,84.5 78.6230402,85.59 77.1030402,85.99 C73.2130402,87 69.4730402,87.64 65.6830402,87.67 C60.2630402,87.72 55.5930402,86.44 52.2730402,83.17 C50.5530402,83.38 48.8130402,83.5 47.0630402,83.5 C45.4030402,83.5 43.7630402,83.4 42.1330402,83.2 C38.8030402,86.45 34.1530402,87.72 28.7530402,87.67 C24.9630402,87.64 21.2230402,87 17.3230402,85.99 C15.8130402,85.59 12.4930402,84.5 9.77304019,83.34 C7.49304019,82.36 5.60304019,81.34 4.09304019,80.14 C2.82304019,79.13 1.79304019,78 1.07304019,76.54 C0.0830401858,74.54 -0.106959814,72.24 0.653040186,70.08 C-0.0469598142,68.43 -0.226959814,66.54 0.323040186,64.45 C0.573040186,63.5 0.983040186,62.62 1.50304019,61.84 C1.39304019,61.43 1.30304019,61.01 1.24304019,60.55 C0.863040186,57.81 1.81304019,55.31 3.60304019,53.37 C4.48304019,52.4 5.43304019,51.73 6.42304019,51.3 C5.69304019,48.2 5.31304019,45.01 5.31304019,41.75 C5.31304019,18.69 24.0030402,0 47.0630402,0 C54.9830402,0 62.3930402,2.2 68.7130402,6.04 C69.8530402,6.74 70.9730402,7.49 72.0430402,8.29 C72.5730402,8.69 73.1030402,9.1 73.6130402,9.53 C74.1330402,9.95 74.6430402,10.39 75.1330402,10.84 C76.6130402,12.19 78.0030402,13.64 79.2730402,15.19 C79.7030402,15.7 80.1130402,16.23 80.5130402,16.77 C81.3230402,17.84 82.0730402,18.95 82.7630402,20.1 C83.8130402,21.82 84.7330402,23.62 85.5330402,25.49 C86.0630402,26.74 86.5230402,28.02 86.9330402,29.33 C87.5430402,31.29 88.0130402,33.31 88.3330402,35.39 C88.4330402,36.08 88.5230402,36.78 88.5930402,37.48 C88.7330402,38.88 88.8130402,40.3 88.8130402,41.75 C88.8130402,44.97 88.4330402,48.13 87.7230402,51.18 C88.8230402,51.61 89.8630402,52.31 90.8330402,53.37 C92.6230402,55.31 93.5730402,57.82 93.1930402,60.56 C93.1330402,61.01 93.0430402,61.43 92.9330402,61.84 C93.4530402,62.62 93.8630402,63.5 94.1130402,64.45 C94.6630402,66.54 94.4830402,68.43 93.7930402,70.08" id="Fill-1" fill="#FFFFFF" fill-rule="nonzero"></path>
 | 
			
		||||
                            <circle id="Oval" fill="#FFD21E" fill-rule="nonzero" cx="46.75" cy="41.75" r="34.75"></circle>
 | 
			
		||||
                            <path d="M81.5,41.75 C81.5,22.5581049 65.9418951,7 46.75,7 C27.5581049,7 12,22.5581049 12,41.75 C12,60.9418951 27.5581049,76.5 46.75,76.5 C65.9418951,76.5 81.5,60.9418951 81.5,41.75 Z M8,41.75 C8,20.3489659 25.3489659,3 46.75,3 C68.1510341,3 85.5,20.3489659 85.5,41.75 C85.5,63.1510341 68.1510341,80.5 46.75,80.5 C25.3489659,80.5 8,63.1510341 8,41.75 Z" id="Oval" fill="#FFAC03" fill-rule="nonzero"></path>
 | 
			
		||||
                            <path d="M57.1723547,31.7151181 C58.0863134,32.7107502 57.3040427,35.2620959 58.7620957,35.2620959 C61.5235194,35.2620959 63.7620957,33.0235196 63.7620957,30.2620959 C63.7620957,27.5006721 61.5235194,25.2620959 58.7620957,25.2620959 C56.0006719,25.2620959 53.7620957,27.5006721 53.7620957,30.2620959 C53.7620957,31.5654666 56.3553563,30.8251108 57.1723547,31.7151181 Z" id="Oval-2" fill="#3A3B45" fill-rule="nonzero" transform="translate(58.762096, 30.262096) rotate(-28.000000) translate(-58.762096, -30.262096) "></path>
 | 
			
		||||
                            <path d="M32.1723553,31.7151181 C33.086314,32.7107502 32.3040433,35.2620959 33.7620963,35.2620959 C36.52352,35.2620959 38.7620963,33.0235196 38.7620963,30.2620959 C38.7620963,27.5006721 36.52352,25.2620959 33.7620963,25.2620959 C31.0006725,25.2620959 28.7620963,27.5006721 28.7620963,30.2620959 C28.7620963,31.5654666 31.3553569,30.8251108 32.1723553,31.7151181 Z" id="Oval-2" fill="#3A3B45" fill-rule="nonzero" transform="translate(33.762096, 30.262096) scale(-1, 1) rotate(-28.000000) translate(-33.762096, -30.262096) "></path>
 | 
			
		||||
                            <g id="Oval-4" transform="translate(33.500000, 41.500000)">
 | 
			
		||||
                                <g id="Mask" fill-rule="nonzero" fill="#3A3B45">
 | 
			
		||||
                                    <path d="M13,14.7890193 C22.8284801,14.7890193 26,6.02605902 26,1.5261751 C26,-0.812484109 24.4279133,-0.0763570998 21.9099482,1.17020987 C19.5830216,2.32219957 16.4482998,3.91011313 13,3.91011313 C5.82029825,3.91011313 0,-2.97370882 0,1.5261751 C0,6.02605902 3.17151989,14.7890193 13,14.7890193 Z" id="path-1"></path>
 | 
			
		||||
                                </g>
 | 
			
		||||
                                <g id="Clipped">
 | 
			
		||||
                                    <mask id="mask-2" fill="white">
 | 
			
		||||
                                        <use xlink:href="#path-1"></use>
 | 
			
		||||
                                    </mask>
 | 
			
		||||
                                    <g id="path-1"></g>
 | 
			
		||||
                                    <path d="M13.25,25 C18.0399291,25 21.9229338,21.1169953 21.9229338,16.3270662 C21.9229338,12.5962324 19.5672252,9.41560375 16.2620987,8.19147116 C16.1404592,8.14641904 16.0175337,8.10401696 15.8933923,8.06433503 C15.0599892,7.79793679 14.1717882,10.6623144 13.25,10.6623144 C12.3886883,10.6623144 11.5567012,7.77968641 10.7713426,8.01349068 C7.18916268,9.07991937 4.57706621,12.3984489 4.57706621,16.3270662 C4.57706621,21.1169953 8.46007093,25 13.25,25 Z" id="Shape" fill="#EF4E4E" fill-rule="nonzero" mask="url(#mask-2)"></path>
 | 
			
		||||
                                </g>
 | 
			
		||||
                            </g>
 | 
			
		||||
                            <circle id="Oval-3" fill="#FFD21E" fill-rule="nonzero" style="mix-blend-mode: multiply;" cx="70.25" cy="33.75" r="3.25"></circle>
 | 
			
		||||
                            <circle id="Oval-3" fill="#FFD21E" fill-rule="nonzero" style="mix-blend-mode: multiply;" cx="23.75" cy="33.75" r="3.25"></circle>
 | 
			
		||||
                        </g>
 | 
			
		||||
                    </g>
 | 
			
		||||
                </g>
 | 
			
		||||
                <g id="Group-4" transform="translate(3.000000, 48.000000)" fill-rule="nonzero">
 | 
			
		||||
                    <path d="M14.0619453,0 L14.0619453,0 C12.4429453,0 10.9959453,0.665 9.98694534,1.871 C9.36294534,2.618 8.71094534,3.822 8.65794534,5.625 C7.97894534,5.43 7.32594534,5.321 6.71594534,5.321 C5.16594534,5.321 3.76594534,5.915 2.77594534,6.994 C1.50394534,8.379 0.938945345,10.081 1.18494534,11.784 C1.30194534,12.595 1.57294534,13.322 1.97794534,13.995 C1.12394534,14.686 0.494945345,15.648 0.190945345,16.805 C-0.0470546551,17.712 -0.291054655,19.601 0.982945345,21.547 C0.901945345,21.674 0.825945345,21.806 0.754945345,21.941 C-0.0110546551,23.395 -0.0600546551,25.038 0.615945345,26.568 C1.64094534,28.887 4.18794534,30.714 9.13394534,32.675 C12.2109453,33.895 15.0259453,34.675 15.0509453,34.682 C19.1189453,35.737 22.7979453,36.273 25.9829453,36.273 C31.8369453,36.273 36.0279453,34.48 38.4399453,30.944 C42.3219453,25.25 41.7669453,20.042 36.7439453,15.022 C33.9639453,12.244 32.1159453,8.148 31.7309453,7.249 C30.9549453,4.587 28.9029453,1.628 25.4919453,1.628 L25.4909453,1.628 C25.2039453,1.628 24.9139453,1.651 24.6279453,1.696 C23.1339453,1.931 21.8279453,2.791 20.8949453,4.085 C19.8879453,2.833 18.9099453,1.837 18.0249453,1.275 C16.6909453,0.429 15.3579453,0 14.0619453,0 M14.0619453,4 C14.5719453,4 15.1949453,4.217 15.8819453,4.653 C18.0149453,6.006 22.1309453,13.081 23.6379453,15.833 C24.1429453,16.755 25.0059453,17.145 25.7829453,17.145 C27.3249453,17.145 28.5289453,15.612 25.9239453,13.664 C22.0069453,10.733 23.3809453,5.942 25.2509453,5.647 C25.3329453,5.634 25.4139453,5.628 25.4919453,5.628 C27.1919453,5.628 27.9419453,8.558 27.9419453,8.558 C27.9419453,8.558 30.1399453,14.078 33.9159453,17.851 C37.6919453,21.625 37.8869453,24.654 35.1349453,28.69 C33.2579453,31.442 29.6649453,32.273 25.9829453,32.273 C22.1639453,32.273 18.2489453,31.379 16.0549453,30.81 C15.9469453,30.782 2.60394534,27.013 4.29394534,23.805 C4.57794534,23.266 5.04594534,23.05 5.63494534,23.05 C8.01494534,23.05 12.3439453,26.592 14.2049453,26.592 C14.6209453,26.592 14.9139453,26.415 15.0339453,25.983 C15.8269453,23.138 2.97694534,21.942 4.05994534,17.821 C4.25094534,17.092 4.76894534,16.796 5.49694534,16.797 C8.64194534,16.797 15.6979453,22.328 17.1769453,22.328 C17.2899453,22.328 17.3709453,22.295 17.4149453,22.225 C18.1559453,21.029 17.7499453,20.194 12.5269453,17.033 C7.30394534,13.871 3.63794534,11.969 5.72294534,9.699 C5.96294534,9.437 6.30294534,9.321 6.71594534,9.321 C9.88694534,9.322 17.3789453,16.14 17.3789453,16.14 C17.3789453,16.14 19.4009453,18.243 20.6239453,18.243 C20.9049453,18.243 21.1439453,18.132 21.3059453,17.858 C22.1729453,16.396 13.2529453,9.636 12.7499453,6.847 C12.4089453,4.957 12.9889453,4 14.0619453,4" id="Fill-1" fill="#FFAC03"></path>
 | 
			
		||||
                    <path d="M35.1348,28.6899 C37.8868,24.6539 37.6918,21.6249 33.9158,17.8509 C30.1398,14.0779 27.9418,8.5579 27.9418,8.5579 C27.9418,8.5579 27.1208,5.3519 25.2508,5.6469 C23.3808,5.9419 22.0078,10.7329 25.9248,13.6639 C29.8418,16.5939 25.1448,18.5849 23.6378,15.8329 C22.1308,13.0809 18.0158,6.0059 15.8818,4.6529 C13.7488,3.2999 12.2468,4.0579 12.7498,6.8469 C13.2528,9.6359 22.1738,16.3959 21.3058,17.8589 C20.4378,19.3209 17.3788,16.1399 17.3788,16.1399 C17.3788,16.1399 7.8068,7.4289 5.7228,9.6989 C3.6388,11.9689 7.3038,13.8709 12.5268,17.0329 C17.7508,20.1939 18.1558,21.0289 17.4148,22.2249 C16.6728,23.4209 5.1428,13.6999 4.0598,17.8209 C2.9778,21.9419 15.8268,23.1379 15.0338,25.9829 C14.2408,28.8289 5.9828,20.5979 4.2938,23.8049 C2.6038,27.0129 15.9468,30.7819 16.0548,30.8099 C20.3648,31.9279 31.3108,34.2969 35.1348,28.6899" id="Fill-4" fill="#FFD21E"></path>
 | 
			
		||||
                </g>
 | 
			
		||||
                <g id="Group-4" transform="translate(70.500000, 66.500000) scale(-1, 1) translate(-70.500000, -66.500000) translate(50.000000, 48.000000)" fill-rule="nonzero">
 | 
			
		||||
                    <path d="M14.0619453,0 L14.0619453,0 C12.4429453,0 10.9959453,0.665 9.98694534,1.871 C9.36294534,2.618 8.71094534,3.822 8.65794534,5.625 C7.97894534,5.43 7.32594534,5.321 6.71594534,5.321 C5.16594534,5.321 3.76594534,5.915 2.77594534,6.994 C1.50394534,8.379 0.938945345,10.081 1.18494534,11.784 C1.30194534,12.595 1.57294534,13.322 1.97794534,13.995 C1.12394534,14.686 0.494945345,15.648 0.190945345,16.805 C-0.0470546551,17.712 -0.291054655,19.601 0.982945345,21.547 C0.901945345,21.674 0.825945345,21.806 0.754945345,21.941 C-0.0110546551,23.395 -0.0600546551,25.038 0.615945345,26.568 C1.64094534,28.887 4.18794534,30.714 9.13394534,32.675 C12.2109453,33.895 15.0259453,34.675 15.0509453,34.682 C19.1189453,35.737 22.7979453,36.273 25.9829453,36.273 C31.8369453,36.273 36.0279453,34.48 38.4399453,30.944 C42.3219453,25.25 41.7669453,20.042 36.7439453,15.022 C33.9639453,12.244 32.1159453,8.148 31.7309453,7.249 C30.9549453,4.587 28.9029453,1.628 25.4919453,1.628 L25.4909453,1.628 C25.2039453,1.628 24.9139453,1.651 24.6279453,1.696 C23.1339453,1.931 21.8279453,2.791 20.8949453,4.085 C19.8879453,2.833 18.9099453,1.837 18.0249453,1.275 C16.6909453,0.429 15.3579453,0 14.0619453,0 M14.0619453,4 C14.5719453,4 15.1949453,4.217 15.8819453,4.653 C18.0149453,6.006 22.1309453,13.081 23.6379453,15.833 C24.1429453,16.755 25.0059453,17.145 25.7829453,17.145 C27.3249453,17.145 28.5289453,15.612 25.9239453,13.664 C22.0069453,10.733 23.3809453,5.942 25.2509453,5.647 C25.3329453,5.634 25.4139453,5.628 25.4919453,5.628 C27.1919453,5.628 27.9419453,8.558 27.9419453,8.558 C27.9419453,8.558 30.1399453,14.078 33.9159453,17.851 C37.6919453,21.625 37.8869453,24.654 35.1349453,28.69 C33.2579453,31.442 29.6649453,32.273 25.9829453,32.273 C22.1639453,32.273 18.2489453,31.379 16.0549453,30.81 C15.9469453,30.782 2.60394534,27.013 4.29394534,23.805 C4.57794534,23.266 5.04594534,23.05 5.63494534,23.05 C8.01494534,23.05 12.3439453,26.592 14.2049453,26.592 C14.6209453,26.592 14.9139453,26.415 15.0339453,25.983 C15.8269453,23.138 2.97694534,21.942 4.05994534,17.821 C4.25094534,17.092 4.76894534,16.796 5.49694534,16.797 C8.64194534,16.797 15.6979453,22.328 17.1769453,22.328 C17.2899453,22.328 17.3709453,22.295 17.4149453,22.225 C18.1559453,21.029 17.7499453,20.194 12.5269453,17.033 C7.30394534,13.871 3.63794534,11.969 5.72294534,9.699 C5.96294534,9.437 6.30294534,9.321 6.71594534,9.321 C9.88694534,9.322 17.3789453,16.14 17.3789453,16.14 C17.3789453,16.14 19.4009453,18.243 20.6239453,18.243 C20.9049453,18.243 21.1439453,18.132 21.3059453,17.858 C22.1729453,16.396 13.2529453,9.636 12.7499453,6.847 C12.4089453,4.957 12.9889453,4 14.0619453,4" id="Fill-1" fill="#FFAC03"></path>
 | 
			
		||||
                    <path d="M35.1348,28.6899 C37.8868,24.6539 37.6918,21.6249 33.9158,17.8509 C30.1398,14.0779 27.9418,8.5579 27.9418,8.5579 C27.9418,8.5579 27.1208,5.3519 25.2508,5.6469 C23.3808,5.9419 22.0078,10.7329 25.9248,13.6639 C29.8418,16.5939 25.1448,18.5849 23.6378,15.8329 C22.1308,13.0809 18.0158,6.0059 15.8818,4.6529 C13.7488,3.2999 12.2468,4.0579 12.7498,6.8469 C13.2528,9.6359 22.1738,16.3959 21.3058,17.8589 C20.4378,19.3209 17.3788,16.1399 17.3788,16.1399 C17.3788,16.1399 7.8068,7.4289 5.7228,9.6989 C3.6388,11.9689 7.3038,13.8709 12.5268,17.0329 C17.7508,20.1939 18.1558,21.0289 17.4148,22.2249 C16.6728,23.4209 5.1428,13.6999 4.0598,17.8209 C2.9778,21.9419 15.8268,23.1379 15.0338,25.9829 C14.2408,28.8289 5.9828,20.5979 4.2938,23.8049 C2.6038,27.0129 15.9468,30.7819 16.0548,30.8099 C20.3648,31.9279 31.3108,34.2969 35.1348,28.6899" id="Fill-4" fill="#FFD21E"></path>
 | 
			
		||||
                </g>
 | 
			
		||||
            </g>
 | 
			
		||||
        </g>
 | 
			
		||||
    </g>
 | 
			
		||||
</svg>
 | 
			
		||||
| 
		 After Width: | Height: | Size: 14 KiB  | 
							
								
								
									
										18
									
								
								docs/source/bertology.rst
									
									
									
									
									
										Normal file
									
								
							
							
						
						@ -0,0 +1,18 @@
 | 
			
		||||
BERTology
 | 
			
		||||
---------
 | 
			
		||||
 | 
			
		||||
There is a growing field of study concerned with investigating the inner working of large-scale transformers like BERT (that some call "BERTology"). Some good examples of this field are:
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
* BERT Rediscovers the Classical NLP Pipeline by Ian Tenney, Dipanjan Das, Ellie Pavlick: https://arxiv.org/abs/1905.05950
 | 
			
		||||
* Are Sixteen Heads Really Better than One? by Paul Michel, Omer Levy, Graham Neubig: https://arxiv.org/abs/1905.10650
 | 
			
		||||
* What Does BERT Look At? An Analysis of BERT's Attention by Kevin Clark, Urvashi Khandelwal, Omer Levy, Christopher D. Manning: https://arxiv.org/abs/1906.04341
 | 
			
		||||
 | 
			
		||||
In order to help this new field develop, we have included a few additional features in the BERT/GPT/GPT-2 models to help people access the inner representations, mainly adapted  from the great work of Paul Michel (https://arxiv.org/abs/1905.10650):
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
* accessing all the hidden-states of BERT/GPT/GPT-2,
 | 
			
		||||
* accessing all the attention weights for each head of BERT/GPT/GPT-2,
 | 
			
		||||
* retrieving heads output values and gradients to be able to compute head importance score and prune head as explained in https://arxiv.org/abs/1905.10650.
 | 
			
		||||
 | 
			
		||||
To help you understand and use these features, we have added a specific example script: `bertology.py <https://github.com/huggingface/transformers/blob/master/examples/run_bertology.py>`_ while extract information and prune a model pre-trained on GLUE.
 | 
			
		||||
							
								
								
									
										188
									
								
								docs/source/conf.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						@ -0,0 +1,188 @@
 | 
			
		||||
# -*- coding: utf-8 -*-
 | 
			
		||||
#
 | 
			
		||||
# Configuration file for the Sphinx documentation builder.
 | 
			
		||||
#
 | 
			
		||||
# This file does only contain a selection of the most common options. For a
 | 
			
		||||
# full list see the documentation:
 | 
			
		||||
# http://www.sphinx-doc.org/en/master/config
 | 
			
		||||
 | 
			
		||||
# -- Path setup --------------------------------------------------------------
 | 
			
		||||
 | 
			
		||||
# If extensions (or modules to document with autodoc) are in another directory,
 | 
			
		||||
# add these directories to sys.path here. If the directory is relative to the
 | 
			
		||||
# documentation root, use os.path.abspath to make it absolute, like shown here.
 | 
			
		||||
#
 | 
			
		||||
import os
 | 
			
		||||
import sys
 | 
			
		||||
sys.path.insert(0, os.path.abspath('../..'))
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# -- Project information -----------------------------------------------------
 | 
			
		||||
 | 
			
		||||
project = u'transformers'
 | 
			
		||||
copyright = u'2019, huggingface'
 | 
			
		||||
author = u'huggingface'
 | 
			
		||||
 | 
			
		||||
# The short X.Y version
 | 
			
		||||
version = u''
 | 
			
		||||
# The full version, including alpha/beta/rc tags
 | 
			
		||||
release = u'1.2.0'
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# -- General configuration ---------------------------------------------------
 | 
			
		||||
 | 
			
		||||
# If your documentation needs a minimal Sphinx version, state it here.
 | 
			
		||||
#
 | 
			
		||||
# needs_sphinx = '1.0'
 | 
			
		||||
 | 
			
		||||
# Add any Sphinx extension module names here, as strings. They can be
 | 
			
		||||
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
 | 
			
		||||
# ones.
 | 
			
		||||
extensions = [
 | 
			
		||||
    'sphinx.ext.autodoc',
 | 
			
		||||
    'sphinx.ext.coverage',
 | 
			
		||||
    'sphinx.ext.napoleon',
 | 
			
		||||
    'recommonmark',
 | 
			
		||||
    'sphinx.ext.viewcode',
 | 
			
		||||
    'sphinx_markdown_tables'
 | 
			
		||||
]
 | 
			
		||||
 | 
			
		||||
# Add any paths that contain templates here, relative to this directory.
 | 
			
		||||
templates_path = ['_templates']
 | 
			
		||||
 | 
			
		||||
# The suffix(es) of source filenames.
 | 
			
		||||
# You can specify multiple suffix as a list of string:
 | 
			
		||||
#
 | 
			
		||||
source_suffix = ['.rst', '.md']
 | 
			
		||||
# source_suffix = '.rst'
 | 
			
		||||
 | 
			
		||||
# The master toctree document.
 | 
			
		||||
master_doc = 'index'
 | 
			
		||||
 | 
			
		||||
# The language for content autogenerated by Sphinx. Refer to documentation
 | 
			
		||||
# for a list of supported languages.
 | 
			
		||||
#
 | 
			
		||||
# This is also used if you do content translation via gettext catalogs.
 | 
			
		||||
# Usually you set "language" from the command line for these cases.
 | 
			
		||||
language = None
 | 
			
		||||
 | 
			
		||||
# List of patterns, relative to source directory, that match files and
 | 
			
		||||
# directories to ignore when looking for source files.
 | 
			
		||||
# This pattern also affects html_static_path and html_extra_path.
 | 
			
		||||
exclude_patterns = [u'_build', 'Thumbs.db', '.DS_Store']
 | 
			
		||||
 | 
			
		||||
# The name of the Pygments (syntax highlighting) style to use.
 | 
			
		||||
pygments_style = None
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# -- Options for HTML output -------------------------------------------------
 | 
			
		||||
 | 
			
		||||
# The theme to use for HTML and HTML Help pages.  See the documentation for
 | 
			
		||||
# a list of builtin themes.
 | 
			
		||||
#
 | 
			
		||||
html_theme = 'sphinx_rtd_theme'
 | 
			
		||||
 | 
			
		||||
# Theme options are theme-specific and customize the look and feel of a theme
 | 
			
		||||
# further.  For a list of options available for each theme, see the
 | 
			
		||||
# documentation.
 | 
			
		||||
#
 | 
			
		||||
html_theme_options = {
 | 
			
		||||
    'analytics_id': 'UA-83738774-2'
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
# Add any paths that contain custom static files (such as style sheets) here,
 | 
			
		||||
# relative to this directory. They are copied after the builtin static files,
 | 
			
		||||
# so a file named "default.css" will overwrite the builtin "default.css".
 | 
			
		||||
html_static_path = ['_static']
 | 
			
		||||
 | 
			
		||||
# Custom sidebar templates, must be a dictionary that maps document names
 | 
			
		||||
# to template names.
 | 
			
		||||
#
 | 
			
		||||
# The default sidebars (for documents that don't match any pattern) are
 | 
			
		||||
# defined by theme itself.  Builtin themes are using these templates by
 | 
			
		||||
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
 | 
			
		||||
# 'searchbox.html']``.
 | 
			
		||||
#
 | 
			
		||||
# html_sidebars = {}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# -- Options for HTMLHelp output ---------------------------------------------
 | 
			
		||||
 | 
			
		||||
# Output file base name for HTML help builder.
 | 
			
		||||
htmlhelp_basename = 'transformersdoc'
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# -- Options for LaTeX output ------------------------------------------------
 | 
			
		||||
 | 
			
		||||
latex_elements = {
 | 
			
		||||
    # The paper size ('letterpaper' or 'a4paper').
 | 
			
		||||
    #
 | 
			
		||||
    # 'papersize': 'letterpaper',
 | 
			
		||||
 | 
			
		||||
    # The font size ('10pt', '11pt' or '12pt').
 | 
			
		||||
    #
 | 
			
		||||
    # 'pointsize': '10pt',
 | 
			
		||||
 | 
			
		||||
    # Additional stuff for the LaTeX preamble.
 | 
			
		||||
    #
 | 
			
		||||
    # 'preamble': '',
 | 
			
		||||
 | 
			
		||||
    # Latex figure (float) alignment
 | 
			
		||||
    #
 | 
			
		||||
    # 'figure_align': 'htbp',
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
# Grouping the document tree into LaTeX files. List of tuples
 | 
			
		||||
# (source start file, target name, title,
 | 
			
		||||
#  author, documentclass [howto, manual, or own class]).
 | 
			
		||||
latex_documents = [
 | 
			
		||||
    (master_doc, 'transformers.tex', u'transformers Documentation',
 | 
			
		||||
     u'huggingface', 'manual'),
 | 
			
		||||
]
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# -- Options for manual page output ------------------------------------------
 | 
			
		||||
 | 
			
		||||
# One entry per manual page. List of tuples
 | 
			
		||||
# (source start file, name, description, authors, manual section).
 | 
			
		||||
man_pages = [
 | 
			
		||||
    (master_doc, 'transformers', u'transformers Documentation',
 | 
			
		||||
     [author], 1)
 | 
			
		||||
]
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# -- Options for Texinfo output ----------------------------------------------
 | 
			
		||||
 | 
			
		||||
# Grouping the document tree into Texinfo files. List of tuples
 | 
			
		||||
# (source start file, target name, title, author,
 | 
			
		||||
#  dir menu entry, description, category)
 | 
			
		||||
texinfo_documents = [
 | 
			
		||||
    (master_doc, 'transformers', u'transformers Documentation',
 | 
			
		||||
     author, 'transformers', 'One line description of project.',
 | 
			
		||||
     'Miscellaneous'),
 | 
			
		||||
]
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# -- Options for Epub output -------------------------------------------------
 | 
			
		||||
 | 
			
		||||
# Bibliographic Dublin Core info.
 | 
			
		||||
epub_title = project
 | 
			
		||||
 | 
			
		||||
# The unique identifier of the text. This can be a ISBN number
 | 
			
		||||
# or the project homepage.
 | 
			
		||||
#
 | 
			
		||||
# epub_identifier = ''
 | 
			
		||||
 | 
			
		||||
# A unique identification for the text.
 | 
			
		||||
#
 | 
			
		||||
# epub_uid = ''
 | 
			
		||||
 | 
			
		||||
# A list of files that should not be packed into the epub file.
 | 
			
		||||
epub_exclude_files = ['search.html']
 | 
			
		||||
 | 
			
		||||
def setup(app):
 | 
			
		||||
    app.add_stylesheet('css/huggingface.css')
 | 
			
		||||
    app.add_stylesheet('css/code-snippets.css')
 | 
			
		||||
    app.add_js_file('js/custom.js')
 | 
			
		||||
 | 
			
		||||
# -- Extension configuration -------------------------------------------------
 | 
			
		||||
							
								
								
									
										101
									
								
								docs/source/converting_tensorflow_models.rst
									
									
									
									
									
										Normal file
									
								
							
							
						
						@ -0,0 +1,101 @@
 | 
			
		||||
Converting Tensorflow Checkpoints
 | 
			
		||||
================================================
 | 
			
		||||
 | 
			
		||||
A command-line interface is provided to convert original Bert/GPT/GPT-2/Transformer-XL/XLNet/XLM checkpoints in models than be loaded using the ``from_pretrained`` methods of the library.
 | 
			
		||||
 | 
			
		||||
BERT
 | 
			
		||||
^^^^
 | 
			
		||||
 | 
			
		||||
You can convert any TensorFlow checkpoint for BERT (in particular `the pre-trained models released by Google <https://github.com/google-research/bert#pre-trained-models>`_\ ) in a PyTorch save file by using the `convert_tf_checkpoint_to_pytorch.py <https://github.com/huggingface/transformers/blob/master/transformers/convert_tf_checkpoint_to_pytorch.py>`_ script.
 | 
			
		||||
 | 
			
		||||
This CLI takes as input a TensorFlow checkpoint (three files starting with ``bert_model.ckpt``\ ) and the associated configuration file (\ ``bert_config.json``\ ), and creates a PyTorch model for this configuration, loads the weights from the TensorFlow checkpoint in the PyTorch model and saves the resulting model in a standard PyTorch save file that can be imported using ``torch.load()`` (see examples in `run_bert_extract_features.py <https://github.com/huggingface/pytorch-pretrained-BERT/tree/master/examples/run_bert_extract_features.py>`_\ , `run_bert_classifier.py <https://github.com/huggingface/pytorch-pretrained-BERT/tree/master/examples/run_bert_classifier.py>`_ and `run_bert_squad.py <https://github.com/huggingface/pytorch-pretrained-BERT/tree/master/examples/run_bert_squad.py>`_\ ).
 | 
			
		||||
 | 
			
		||||
You only need to run this conversion script **once** to get a PyTorch model. You can then disregard the TensorFlow checkpoint (the three files starting with ``bert_model.ckpt``\ ) but be sure to keep the configuration file (\ ``bert_config.json``\ ) and the vocabulary file (\ ``vocab.txt``\ ) as these are needed for the PyTorch model too.
 | 
			
		||||
 | 
			
		||||
To run this specific conversion script you will need to have TensorFlow and PyTorch installed (\ ``pip install tensorflow``\ ). The rest of the repository only requires PyTorch.
 | 
			
		||||
 | 
			
		||||
Here is an example of the conversion process for a pre-trained ``BERT-Base Uncased`` model:
 | 
			
		||||
 | 
			
		||||
.. code-block:: shell
 | 
			
		||||
 | 
			
		||||
   export BERT_BASE_DIR=/path/to/bert/uncased_L-12_H-768_A-12
 | 
			
		||||
 | 
			
		||||
   transformers bert \
 | 
			
		||||
     $BERT_BASE_DIR/bert_model.ckpt \
 | 
			
		||||
     $BERT_BASE_DIR/bert_config.json \
 | 
			
		||||
     $BERT_BASE_DIR/pytorch_model.bin
 | 
			
		||||
 | 
			
		||||
You can download Google's pre-trained models for the conversion `here <https://github.com/google-research/bert#pre-trained-models>`__.
 | 
			
		||||
 | 
			
		||||
OpenAI GPT
 | 
			
		||||
^^^^^^^^^^
 | 
			
		||||
 | 
			
		||||
Here is an example of the conversion process for a pre-trained OpenAI GPT model, assuming that your NumPy checkpoint save as the same format than OpenAI pretrained model (see `here <https://github.com/openai/finetune-transformer-lm>`__\ )
 | 
			
		||||
 | 
			
		||||
.. code-block:: shell
 | 
			
		||||
 | 
			
		||||
   export OPENAI_GPT_CHECKPOINT_FOLDER_PATH=/path/to/openai/pretrained/numpy/weights
 | 
			
		||||
 | 
			
		||||
   transformers gpt \
 | 
			
		||||
     $OPENAI_GPT_CHECKPOINT_FOLDER_PATH \
 | 
			
		||||
     $PYTORCH_DUMP_OUTPUT \
 | 
			
		||||
     [OPENAI_GPT_CONFIG]
 | 
			
		||||
 | 
			
		||||
OpenAI GPT-2
 | 
			
		||||
^^^^^^^^^^^^
 | 
			
		||||
 | 
			
		||||
Here is an example of the conversion process for a pre-trained OpenAI GPT-2 model (see `here <https://github.com/openai/gpt-2>`__\ )
 | 
			
		||||
 | 
			
		||||
.. code-block:: shell
 | 
			
		||||
 | 
			
		||||
   export OPENAI_GPT2_CHECKPOINT_PATH=/path/to/gpt2/pretrained/weights
 | 
			
		||||
 | 
			
		||||
   transformers gpt2 \
 | 
			
		||||
     $OPENAI_GPT2_CHECKPOINT_PATH \
 | 
			
		||||
     $PYTORCH_DUMP_OUTPUT \
 | 
			
		||||
     [OPENAI_GPT2_CONFIG]
 | 
			
		||||
 | 
			
		||||
Transformer-XL
 | 
			
		||||
^^^^^^^^^^^^^^
 | 
			
		||||
 | 
			
		||||
Here is an example of the conversion process for a pre-trained Transformer-XL model (see `here <https://github.com/kimiyoung/transformer-xl/tree/master/tf#obtain-and-evaluate-pretrained-sota-models>`__\ )
 | 
			
		||||
 | 
			
		||||
.. code-block:: shell
 | 
			
		||||
 | 
			
		||||
   export TRANSFO_XL_CHECKPOINT_FOLDER_PATH=/path/to/transfo/xl/checkpoint
 | 
			
		||||
 | 
			
		||||
   transformers transfo_xl \
 | 
			
		||||
     $TRANSFO_XL_CHECKPOINT_FOLDER_PATH \
 | 
			
		||||
     $PYTORCH_DUMP_OUTPUT \
 | 
			
		||||
     [TRANSFO_XL_CONFIG]
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
XLNet
 | 
			
		||||
^^^^^
 | 
			
		||||
 | 
			
		||||
Here is an example of the conversion process for a pre-trained XLNet model, fine-tuned on STS-B using the TensorFlow script:
 | 
			
		||||
 | 
			
		||||
.. code-block:: shell
 | 
			
		||||
 | 
			
		||||
   export TRANSFO_XL_CHECKPOINT_PATH=/path/to/xlnet/checkpoint
 | 
			
		||||
   export TRANSFO_XL_CONFIG_PATH=/path/to/xlnet/config
 | 
			
		||||
 | 
			
		||||
   transformers xlnet \
 | 
			
		||||
     $TRANSFO_XL_CHECKPOINT_PATH \
 | 
			
		||||
     $TRANSFO_XL_CONFIG_PATH \
 | 
			
		||||
     $PYTORCH_DUMP_OUTPUT \
 | 
			
		||||
     STS-B \
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
XLM
 | 
			
		||||
^^^
 | 
			
		||||
 | 
			
		||||
Here is an example of the conversion process for a pre-trained XLM model:
 | 
			
		||||
 | 
			
		||||
.. code-block:: shell
 | 
			
		||||
 | 
			
		||||
   export XLM_CHECKPOINT_PATH=/path/to/xlm/checkpoint
 | 
			
		||||
 | 
			
		||||
   transformers xlm \
 | 
			
		||||
     $XLM_CHECKPOINT_PATH \
 | 
			
		||||
     $PYTORCH_DUMP_OUTPUT \
 | 
			
		||||
							
								
								
									
										
											BIN
										
									
								
								docs/source/imgs/transformers_logo_name.png
									
									
									
									
									
										Normal file
									
								
							
							
						
						| 
		 After Width: | Height: | Size: 8.7 KiB  | 
| 
		 Before Width: | Height: | Size: 9.7 KiB After Width: | Height: | Size: 9.7 KiB  | 
| 
		 Before Width: | Height: | Size: 22 KiB After Width: | Height: | Size: 22 KiB  | 
| 
		 Before Width: | Height: | Size: 17 KiB After Width: | Height: | Size: 17 KiB  | 
| 
		 Before Width: | Height: | Size: 22 KiB After Width: | Height: | Size: 22 KiB  | 
| 
		 Before Width: | Height: | Size: 16 KiB After Width: | Height: | Size: 16 KiB  | 
							
								
								
									
										84
									
								
								docs/source/index.rst
									
									
									
									
									
										Normal file
									
								
							
							
						
						@ -0,0 +1,84 @@
 | 
			
		||||
Transformers
 | 
			
		||||
================================================================================================================================================
 | 
			
		||||
 | 
			
		||||
🤗 Transformers (formerly known as `pytorch-transformers` and `pytorch-pretrained-bert`) provides general-purpose architectures
 | 
			
		||||
(BERT, GPT-2, RoBERTa, XLM, DistilBert, XLNet...) for Natural Language Understanding (NLU) and Natural Language Generation
 | 
			
		||||
(NLG) with over 32+ pretrained models in 100+ languages and deep interoperability between TensorFlow 2.0 and PyTorch.
 | 
			
		||||
 | 
			
		||||
Features
 | 
			
		||||
---------------------------------------------------
 | 
			
		||||
 | 
			
		||||
- As easy to use as pytorch-transformers
 | 
			
		||||
- As powerful and concise as Keras
 | 
			
		||||
- High performance on NLU and NLG tasks
 | 
			
		||||
- Low barrier to entry for educators and practitioners
 | 
			
		||||
 | 
			
		||||
State-of-the-art NLP for everyone
 | 
			
		||||
- Deep learning researchers
 | 
			
		||||
- Hands-on practitioners
 | 
			
		||||
- AI/ML/NLP teachers and educators
 | 
			
		||||
 | 
			
		||||
Lower compute costs, smaller carbon footprint
 | 
			
		||||
- Researchers can share trained models instead of always retraining
 | 
			
		||||
- Practitioners can reduce compute time and production costs
 | 
			
		||||
- 8 architectures with over 30 pretrained models, some in more than 100 languages
 | 
			
		||||
 | 
			
		||||
Choose the right framework for every part of a model's lifetime
 | 
			
		||||
- Train state-of-the-art models in 3 lines of code
 | 
			
		||||
- Deep interoperability between TensorFlow 2.0 and PyTorch models
 | 
			
		||||
- Move a single model between TF2.0/PyTorch frameworks at will
 | 
			
		||||
- Seamlessly pick the right framework for training, evaluation, production
 | 
			
		||||
 | 
			
		||||
Contents
 | 
			
		||||
---------------------------------
 | 
			
		||||
 | 
			
		||||
The library currently contains PyTorch and Tensorflow implementations, pre-trained model weights, usage scripts and conversion utilities for the following models:
 | 
			
		||||
 | 
			
		||||
1. `BERT <https://github.com/google-research/bert>`_ (from Google) released with the paper `BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding <https://arxiv.org/abs/1810.04805>`_ by Jacob Devlin, Ming-Wei Chang, Kenton Lee and Kristina Toutanova.
 | 
			
		||||
2. `GPT <https://github.com/openai/finetune-transformer-lm>`_ (from OpenAI) released with the paper `Improving Language Understanding by Generative Pre-Training <https://blog.openai.com/language-unsupervised>`_ by Alec Radford, Karthik Narasimhan, Tim Salimans and Ilya Sutskever.
 | 
			
		||||
3. `GPT-2 <https://blog.openai.com/better-language-models>`_ (from OpenAI) released with the paper `Language Models are Unsupervised Multitask Learners <https://blog.openai.com/better-language-models>`_ by Alec Radford*, Jeffrey Wu*, Rewon Child, David Luan, Dario Amodei** and Ilya Sutskever**.
 | 
			
		||||
4. `Transformer-XL <https://github.com/kimiyoung/transformer-xl>`_ (from Google/CMU) released with the paper `Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context <https://arxiv.org/abs/1901.02860>`_ by Zihang Dai*, Zhilin Yang*, Yiming Yang, Jaime Carbonell, Quoc V. Le, Ruslan Salakhutdinov.
 | 
			
		||||
5. `XLNet <https://github.com/zihangdai/xlnet>`_ (from Google/CMU) released with the paper `XLNet: Generalized Autoregressive Pretraining for Language Understanding <https://arxiv.org/abs/1906.08237>`_ by Zhilin Yang*, Zihang Dai*, Yiming Yang, Jaime Carbonell, Ruslan Salakhutdinov, Quoc V. Le.
 | 
			
		||||
6. `XLM <https://github.com/facebookresearch/XLM>`_ (from Facebook) released together with the paper `Cross-lingual Language Model Pretraining <https://arxiv.org/abs/1901.07291>`_ by Guillaume Lample and Alexis Conneau.
 | 
			
		||||
7. `RoBERTa <https://github.com/pytorch/fairseq/tree/master/examples/roberta>`_ (from Facebook), released together with the paper a `Robustly Optimized BERT Pretraining Approach <https://arxiv.org/abs/1907.11692>`_ by Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, Veselin Stoyanov.
 | 
			
		||||
8. `DistilBERT <https://huggingface.co/transformers/model_doc/distilbert.html>`_ (from HuggingFace) released together with the blog post `Smaller, faster, cheaper, lighter: Introducing DistilBERT, a distilled version of BERT <https://medium.com/huggingface/distilbert-8cf3380435b5>`_ by Victor Sanh, Lysandre Debut and Thomas Wolf.
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
.. toctree::
 | 
			
		||||
    :maxdepth: 2
 | 
			
		||||
    :caption: Notes
 | 
			
		||||
 | 
			
		||||
    installation
 | 
			
		||||
    quickstart
 | 
			
		||||
    pretrained_models
 | 
			
		||||
    examples
 | 
			
		||||
    notebooks
 | 
			
		||||
    serialization
 | 
			
		||||
    converting_tensorflow_models
 | 
			
		||||
    migration
 | 
			
		||||
    bertology
 | 
			
		||||
    torchscript
 | 
			
		||||
 | 
			
		||||
.. toctree::
 | 
			
		||||
    :maxdepth: 2
 | 
			
		||||
    :caption: Main classes
 | 
			
		||||
 | 
			
		||||
    main_classes/configuration
 | 
			
		||||
    main_classes/model
 | 
			
		||||
    main_classes/tokenizer
 | 
			
		||||
    main_classes/optimizer_schedules
 | 
			
		||||
    main_classes/processors
 | 
			
		||||
 | 
			
		||||
.. toctree::
 | 
			
		||||
    :maxdepth: 2
 | 
			
		||||
    :caption: Package Reference
 | 
			
		||||
 | 
			
		||||
    model_doc/auto
 | 
			
		||||
    model_doc/bert
 | 
			
		||||
    model_doc/gpt
 | 
			
		||||
    model_doc/transformerxl
 | 
			
		||||
    model_doc/gpt2
 | 
			
		||||
    model_doc/xlm
 | 
			
		||||
    model_doc/xlnet
 | 
			
		||||
    model_doc/roberta
 | 
			
		||||
    model_doc/distilbert
 | 
			
		||||
							
								
								
									
										71
									
								
								docs/source/installation.rst
									
									
									
									
									
										Normal file
									
								
							
							
						
						@ -0,0 +1,71 @@
 | 
			
		||||
Installation
 | 
			
		||||
================================================
 | 
			
		||||
 | 
			
		||||
Transformers is tested on Python 2.7 and 3.5+ (examples are tested only on python 3.5+) and PyTorch 1.1.0
 | 
			
		||||
 | 
			
		||||
With pip
 | 
			
		||||
^^^^^^^^
 | 
			
		||||
 | 
			
		||||
PyTorch Transformers can be installed using pip as follows:
 | 
			
		||||
 | 
			
		||||
.. code-block:: bash
 | 
			
		||||
 | 
			
		||||
   pip install transformers
 | 
			
		||||
 | 
			
		||||
From source
 | 
			
		||||
^^^^^^^^^^^
 | 
			
		||||
 | 
			
		||||
To install from source, clone the repository and install with:
 | 
			
		||||
 | 
			
		||||
.. code-block:: bash
 | 
			
		||||
 | 
			
		||||
    git clone https://github.com/huggingface/transformers.git
 | 
			
		||||
    cd transformers
 | 
			
		||||
    pip install [--editable] .
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
Tests
 | 
			
		||||
^^^^^
 | 
			
		||||
 | 
			
		||||
An extensive test suite is included to test the library behavior and several examples. Library tests can be found in the `tests folder <https://github.com/huggingface/transformers/tree/master/transformers/tests>`_ and examples tests in the `examples folder <https://github.com/huggingface/transformers/tree/master/examples>`_.
 | 
			
		||||
 | 
			
		||||
Tests can be run using `pytest` (install pytest if needed with `pip install pytest`).
 | 
			
		||||
 | 
			
		||||
Run all the tests from the root of the cloned repository with the commands:
 | 
			
		||||
 | 
			
		||||
.. code-block:: bash
 | 
			
		||||
 | 
			
		||||
    python -m pytest -sv ./transformers/tests/
 | 
			
		||||
    python -m pytest -sv ./examples/
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
OpenAI GPT original tokenization workflow
 | 
			
		||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 | 
			
		||||
 | 
			
		||||
If you want to reproduce the original tokenization process of the ``OpenAI GPT`` paper, you will need to install ``ftfy`` (use version 4.4.3 if you are using Python 2) and ``SpaCy`` :
 | 
			
		||||
 | 
			
		||||
.. code-block:: bash
 | 
			
		||||
 | 
			
		||||
   pip install spacy ftfy==4.4.3
 | 
			
		||||
   python -m spacy download en
 | 
			
		||||
 | 
			
		||||
If you don't install ``ftfy`` and ``SpaCy``\ , the ``OpenAI GPT`` tokenizer will default to tokenize using BERT's ``BasicTokenizer`` followed by Byte-Pair Encoding (which should be fine for most usage, don't worry).
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
Note on model downloads (Continuous Integration or large-scale deployments)
 | 
			
		||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 | 
			
		||||
 | 
			
		||||
If you expect to be downloading large volumes of models (more than 1,000) from our hosted bucket (for instance through your CI setup, or a large-scale production deployment), please cache the model files on your end. It will be way faster, and cheaper. Feel free to contact us privately if you need any help.
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
Do you want to run a Transformer model on a mobile device?
 | 
			
		||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 | 
			
		||||
 | 
			
		||||
You should check out our `swift-coreml-transformers <https://github.com/huggingface/swift-coreml-transformers>`_ repo.
 | 
			
		||||
 | 
			
		||||
It contains an example of a conversion script from a Pytorch trained Transformer model (here, ``GPT-2``) to a CoreML model that runs on iOS devices.
 | 
			
		||||
 | 
			
		||||
It also contains an implementation of BERT for Question answering.
 | 
			
		||||
 | 
			
		||||
At some point in the future, you'll be able to seamlessly move from pre-training or fine-tuning models in PyTorch to productizing them in CoreML,
 | 
			
		||||
or prototype a model or an app in CoreML then research its hyperparameters or architecture from PyTorch. Super exciting!
 | 
			
		||||
							
								
								
									
										10
									
								
								docs/source/main_classes/configuration.rst
									
									
									
									
									
										Normal file
									
								
							
							
						
						@ -0,0 +1,10 @@
 | 
			
		||||
Configuration
 | 
			
		||||
----------------------------------------------------
 | 
			
		||||
 | 
			
		||||
The base class ``PretrainedConfig`` implements the common methods for loading/saving a configuration either from a local file or directory, or from a pretrained model configuration provided by the library (downloaded from HuggingFace's AWS S3 repository).
 | 
			
		||||
 | 
			
		||||
``PretrainedConfig``
 | 
			
		||||
~~~~~~~~~~~~~~~~~~~~~
 | 
			
		||||
 | 
			
		||||
.. autoclass:: transformers.PretrainedConfig
 | 
			
		||||
    :members:
 | 
			
		||||
							
								
								
									
										21
									
								
								docs/source/main_classes/model.rst
									
									
									
									
									
										Normal file
									
								
							
							
						
						@ -0,0 +1,21 @@
 | 
			
		||||
Models
 | 
			
		||||
----------------------------------------------------
 | 
			
		||||
 | 
			
		||||
The base class ``PreTrainedModel`` implements the common methods for loading/saving a model either from a local file or directory, or from a pretrained model configuration provided by the library (downloaded from HuggingFace's AWS S3 repository).
 | 
			
		||||
 | 
			
		||||
``PreTrainedModel`` also implements a few methods which are common among all the models to:
 | 
			
		||||
 | 
			
		||||
- resize the input token embeddings when new tokens are added to the vocabulary
 | 
			
		||||
- prune the attention heads of the model.
 | 
			
		||||
 | 
			
		||||
``PreTrainedModel``
 | 
			
		||||
~~~~~~~~~~~~~~~~~~~~~
 | 
			
		||||
 | 
			
		||||
.. autoclass:: transformers.PreTrainedModel
 | 
			
		||||
    :members:
 | 
			
		||||
 | 
			
		||||
``TFPreTrainedModel``
 | 
			
		||||
~~~~~~~~~~~~~~~~~~~~~
 | 
			
		||||
 | 
			
		||||
.. autoclass:: pytorch_transformers.TFPreTrainedModel
 | 
			
		||||
    :members:
 | 
			
		||||
							
								
								
									
										55
									
								
								docs/source/main_classes/optimizer_schedules.rst
									
									
									
									
									
										Normal file
									
								
							
							
						
						@ -0,0 +1,55 @@
 | 
			
		||||
Optimizer
 | 
			
		||||
----------------------------------------------------
 | 
			
		||||
 | 
			
		||||
The ``.optimization`` module provides:
 | 
			
		||||
 | 
			
		||||
- an optimizer with weight decay fixed that can be used to fine-tuned models, and
 | 
			
		||||
- several schedules in the form of schedule objects that inherit from ``_LRSchedule``:
 | 
			
		||||
 | 
			
		||||
``AdamW``
 | 
			
		||||
~~~~~~~~~~~~~~~~
 | 
			
		||||
 | 
			
		||||
.. autoclass:: transformers.AdamW
 | 
			
		||||
    :members:
 | 
			
		||||
 | 
			
		||||
Schedules
 | 
			
		||||
----------------------------------------------------
 | 
			
		||||
 | 
			
		||||
Learning Rate Schedules
 | 
			
		||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 | 
			
		||||
 | 
			
		||||
.. autoclass:: transformers.ConstantLRSchedule
 | 
			
		||||
    :members:
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
.. autoclass:: transformers.WarmupConstantSchedule
 | 
			
		||||
    :members:
 | 
			
		||||
 | 
			
		||||
.. image:: /imgs/warmup_constant_schedule.png
 | 
			
		||||
    :target: /imgs/warmup_constant_schedule.png
 | 
			
		||||
    :alt:
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
.. autoclass:: transformers.WarmupCosineSchedule
 | 
			
		||||
    :members:
 | 
			
		||||
 | 
			
		||||
.. image:: /imgs/warmup_cosine_schedule.png
 | 
			
		||||
    :target: /imgs/warmup_cosine_schedule.png
 | 
			
		||||
    :alt:
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
.. autoclass:: transformers.WarmupCosineWithHardRestartsSchedule
 | 
			
		||||
    :members:
 | 
			
		||||
 | 
			
		||||
.. image:: /imgs/warmup_cosine_hard_restarts_schedule.png
 | 
			
		||||
    :target: /imgs/warmup_cosine_hard_restarts_schedule.png
 | 
			
		||||
    :alt:
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
.. autoclass:: transformers.WarmupLinearSchedule
 | 
			
		||||
    :members:
 | 
			
		||||
 | 
			
		||||
.. image:: /imgs/warmup_linear_schedule.png
 | 
			
		||||
    :target: /imgs/warmup_linear_schedule.png
 | 
			
		||||
    :alt:
 | 
			
		||||
							
								
								
									
										58
									
								
								docs/source/main_classes/processors.rst
									
									
									
									
									
										Normal file
									
								
							
							
						
						@ -0,0 +1,58 @@
 | 
			
		||||
Processors
 | 
			
		||||
----------------------------------------------------
 | 
			
		||||
 | 
			
		||||
This library includes processors for several traditional tasks. These processors can be used to process a dataset into
 | 
			
		||||
examples that can be fed to a model.
 | 
			
		||||
 | 
			
		||||
Processors
 | 
			
		||||
~~~~~~~~~~~~~~~~~~~~~
 | 
			
		||||
 | 
			
		||||
All processors follow the same architecture which is that of the
 | 
			
		||||
:class:`~pytorch_transformers.data.processors.utils.DataProcessor`. The processor returns a list
 | 
			
		||||
of :class:`~pytorch_transformers.data.processors.utils.InputExample`. These
 | 
			
		||||
:class:`~pytorch_transformers.data.processors.utils.InputExample` can be converted to
 | 
			
		||||
:class:`~pytorch_transformers.data.processors.utils.InputFeatures` in order to be fed to the model.
 | 
			
		||||
 | 
			
		||||
.. autoclass:: pytorch_transformers.data.processors.utils.DataProcessor
 | 
			
		||||
    :members:
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
.. autoclass:: pytorch_transformers.data.processors.utils.InputExample
 | 
			
		||||
    :members:
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
.. autoclass:: pytorch_transformers.data.processors.utils.InputFeatures
 | 
			
		||||
    :members:
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
GLUE
 | 
			
		||||
~~~~~~~~~~~~~~~~~~~~~
 | 
			
		||||
 | 
			
		||||
`General Language Understanding Evaluation (GLUE) <https://gluebenchmark.com/>`__ is a benchmark that evaluates
 | 
			
		||||
the performance of models across a diverse set of existing NLU tasks. It was released together with the paper
 | 
			
		||||
`GLUE: A multi-task benchmark and analysis platform for natural language understanding <https://openreview.net/pdf?id=rJ4km2R5t7>`__
 | 
			
		||||
 | 
			
		||||
This library hosts a total of 10 processors for the following tasks: MRPC, MNLI, MNLI (mismatched),
 | 
			
		||||
CoLA, SST2, STSB, QQP, QNLI, RTE and WNLI.
 | 
			
		||||
 | 
			
		||||
Those processors are:
 | 
			
		||||
    - :class:`~pytorch_transformers.data.processors.utils.MrpcProcessor`
 | 
			
		||||
    - :class:`~pytorch_transformers.data.processors.utils.MnliProcessor`
 | 
			
		||||
    - :class:`~pytorch_transformers.data.processors.utils.MnliMismatchedProcessor`
 | 
			
		||||
    - :class:`~pytorch_transformers.data.processors.utils.Sst2Processor`
 | 
			
		||||
    - :class:`~pytorch_transformers.data.processors.utils.StsbProcessor`
 | 
			
		||||
    - :class:`~pytorch_transformers.data.processors.utils.QqpProcessor`
 | 
			
		||||
    - :class:`~pytorch_transformers.data.processors.utils.QnliProcessor`
 | 
			
		||||
    - :class:`~pytorch_transformers.data.processors.utils.RteProcessor`
 | 
			
		||||
    - :class:`~pytorch_transformers.data.processors.utils.WnliProcessor`
 | 
			
		||||
 | 
			
		||||
Additionally, the following method  can be used to load values from a data file and convert them to a list of
 | 
			
		||||
:class:`~pytorch_transformers.data.processors.utils.InputExample`.
 | 
			
		||||
 | 
			
		||||
.. automethod:: pytorch_transformers.data.processors.glue.glue_convert_examples_to_features
 | 
			
		||||
 | 
			
		||||
Example usage
 | 
			
		||||
^^^^^^^^^^^^^^^^^^^^^^^^^
 | 
			
		||||
 | 
			
		||||
An example using these processors is given in the
 | 
			
		||||
`run_glue.py <https://github.com/huggingface/pytorch-transformers/blob/master/examples/run_glue.py>`__ script.
 | 
			
		||||
							
								
								
									
										16
									
								
								docs/source/main_classes/tokenizer.rst
									
									
									
									
									
										Normal file
									
								
							
							
						
						@ -0,0 +1,16 @@
 | 
			
		||||
Tokenizer
 | 
			
		||||
----------------------------------------------------
 | 
			
		||||
 | 
			
		||||
The base class ``PreTrainedTokenizer`` implements the common methods for loading/saving a tokenizer either from a local file or directory, or from a pretrained tokenizer provided by the library (downloaded from HuggingFace's AWS S3 repository).
 | 
			
		||||
 | 
			
		||||
``PreTrainedTokenizer`` is the main entry point into tokenizers as it also implements the main methods for using all the tokenizers:
 | 
			
		||||
 | 
			
		||||
- tokenizing, converting tokens to ids and back and encoding/decoding,
 | 
			
		||||
- adding new tokens to the vocabulary in a way that is independant of the underlying structure (BPE, SentencePiece...),
 | 
			
		||||
- managing special tokens (adding them, assigning them to roles, making sure they are not split during tokenization)
 | 
			
		||||
 | 
			
		||||
``PreTrainedTokenizer``
 | 
			
		||||
~~~~~~~~~~~~~~~~~~~~~~~~
 | 
			
		||||
 | 
			
		||||
.. autoclass:: transformers.PreTrainedTokenizer
 | 
			
		||||
    :members:
 | 
			
		||||
							
								
								
									
										109
									
								
								docs/source/migration.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						@ -0,0 +1,109 @@
 | 
			
		||||
# Migrating from pytorch-pretrained-bert
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
Here is a quick summary of what you should take care of when migrating from `pytorch-pretrained-bert` to `transformers`
 | 
			
		||||
 | 
			
		||||
### Models always output `tuples`
 | 
			
		||||
 | 
			
		||||
The main breaking change when migrating from `pytorch-pretrained-bert` to `transformers` is that the models forward method always outputs a `tuple` with various elements depending on the model and the configuration parameters.
 | 
			
		||||
 | 
			
		||||
The exact content of the tuples for each model are detailled in the models' docstrings and the [documentation](https://huggingface.co/transformers/).
 | 
			
		||||
 | 
			
		||||
In pretty much every case, you will be fine by taking the first element of the output as the output you previously used in `pytorch-pretrained-bert`.
 | 
			
		||||
 | 
			
		||||
Here is a `pytorch-pretrained-bert` to `transformers` conversion example for a `BertForSequenceClassification` classification model:
 | 
			
		||||
 | 
			
		||||
```python
 | 
			
		||||
# Let's load our model
 | 
			
		||||
model = BertForSequenceClassification.from_pretrained('bert-base-uncased')
 | 
			
		||||
 | 
			
		||||
# If you used to have this line in pytorch-pretrained-bert:
 | 
			
		||||
loss = model(input_ids, labels=labels)
 | 
			
		||||
 | 
			
		||||
# Now just use this line in transformers to extract the loss from the output tuple:
 | 
			
		||||
outputs = model(input_ids, labels=labels)
 | 
			
		||||
loss = outputs[0]
 | 
			
		||||
 | 
			
		||||
# In transformers you can also have access to the logits:
 | 
			
		||||
loss, logits = outputs[:2]
 | 
			
		||||
 | 
			
		||||
# And even the attention weigths if you configure the model to output them (and other outputs too, see the docstrings and documentation)
 | 
			
		||||
model = BertForSequenceClassification.from_pretrained('bert-base-uncased', output_attentions=True)
 | 
			
		||||
outputs = model(input_ids, labels=labels)
 | 
			
		||||
loss, logits, attentions = outputs
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
### Serialization
 | 
			
		||||
 | 
			
		||||
Breaking change in the `from_pretrained()`method:
 | 
			
		||||
 | 
			
		||||
1. Models are now set in evaluation mode by default when instantiated with the `from_pretrained()` method. To train them don't forget to set them back in training mode (`model.train()`) to activate the dropout modules.
 | 
			
		||||
 | 
			
		||||
2. The additional `*inputs` and `**kwargs` arguments supplied to the `from_pretrained()` method used to be directly passed to the underlying model's class `__init__()` method. They are now used to update the model configuration attribute first which can break derived model classes build based on the previous `BertForSequenceClassification` examples. More precisely, the positional arguments `*inputs` provided to `from_pretrained()` are directly forwarded the model `__init__()` method while the keyword arguments `**kwargs` (i) which match configuration class attributes are used to update said attributes (ii) which don't match any configuration class attributes are forwarded to the model `__init__()` method.
 | 
			
		||||
 | 
			
		||||
Also, while not a breaking change, the serialization methods have been standardized and you probably should switch to the new method `save_pretrained(save_directory)` if you were using any other serialization method before.
 | 
			
		||||
 | 
			
		||||
Here is an example:
 | 
			
		||||
 | 
			
		||||
```python
 | 
			
		||||
### Let's load a model and tokenizer
 | 
			
		||||
model = BertForSequenceClassification.from_pretrained('bert-base-uncased')
 | 
			
		||||
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
 | 
			
		||||
 | 
			
		||||
### Do some stuff to our model and tokenizer
 | 
			
		||||
# Ex: add new tokens to the vocabulary and embeddings of our model
 | 
			
		||||
tokenizer.add_tokens(['[SPECIAL_TOKEN_1]', '[SPECIAL_TOKEN_2]'])
 | 
			
		||||
model.resize_token_embeddings(len(tokenizer))
 | 
			
		||||
# Train our model
 | 
			
		||||
train(model)
 | 
			
		||||
 | 
			
		||||
### Now let's save our model and tokenizer to a directory
 | 
			
		||||
model.save_pretrained('./my_saved_model_directory/')
 | 
			
		||||
tokenizer.save_pretrained('./my_saved_model_directory/')
 | 
			
		||||
 | 
			
		||||
### Reload the model and the tokenizer
 | 
			
		||||
model = BertForSequenceClassification.from_pretrained('./my_saved_model_directory/')
 | 
			
		||||
tokenizer = BertTokenizer.from_pretrained('./my_saved_model_directory/')
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
### Optimizers: BertAdam & OpenAIAdam are now AdamW, schedules are standard PyTorch schedules
 | 
			
		||||
 | 
			
		||||
The two optimizers previously included, `BertAdam` and `OpenAIAdam`, have been replaced by a single `AdamW` optimizer which has a few differences:
 | 
			
		||||
 | 
			
		||||
- it only implements weights decay correction,
 | 
			
		||||
- schedules are now externals (see below),
 | 
			
		||||
- gradient clipping is now also external (see below).
 | 
			
		||||
 | 
			
		||||
The new optimizer `AdamW` matches PyTorch `Adam` optimizer API and let you use standard PyTorch or apex methods for the schedule and clipping.
 | 
			
		||||
 | 
			
		||||
The schedules are now standard [PyTorch learning rate schedulers](https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate) and not part of the optimizer anymore.
 | 
			
		||||
 | 
			
		||||
Here is a conversion examples from `BertAdam` with a linear warmup and decay schedule to `AdamW` and the same schedule:
 | 
			
		||||
 | 
			
		||||
```python
 | 
			
		||||
# Parameters:
 | 
			
		||||
lr = 1e-3
 | 
			
		||||
max_grad_norm = 1.0
 | 
			
		||||
num_total_steps = 1000
 | 
			
		||||
num_warmup_steps = 100
 | 
			
		||||
warmup_proportion = float(num_warmup_steps) / float(num_total_steps)  # 0.1
 | 
			
		||||
 | 
			
		||||
### Previously BertAdam optimizer was instantiated like this:
 | 
			
		||||
optimizer = BertAdam(model.parameters(), lr=lr, schedule='warmup_linear', warmup=warmup_proportion, t_total=num_total_steps)
 | 
			
		||||
### and used like this:
 | 
			
		||||
for batch in train_data:
 | 
			
		||||
    loss = model(batch)
 | 
			
		||||
    loss.backward()
 | 
			
		||||
    optimizer.step()
 | 
			
		||||
 | 
			
		||||
### In Transformers, optimizer and schedules are splitted and instantiated like this:
 | 
			
		||||
optimizer = AdamW(model.parameters(), lr=lr, correct_bias=False)  # To reproduce BertAdam specific behavior set correct_bias=False
 | 
			
		||||
scheduler = WarmupLinearSchedule(optimizer, warmup_steps=num_warmup_steps, t_total=num_total_steps)  # PyTorch scheduler
 | 
			
		||||
### and used like this:
 | 
			
		||||
for batch in train_data:
 | 
			
		||||
    loss = model(batch)
 | 
			
		||||
    loss.backward()
 | 
			
		||||
    torch.nn.utils.clip_grad_norm_(model.parameters(), max_grad_norm)  # Gradient clipping is not in AdamW anymore (so you can use amp without issue)
 | 
			
		||||
    scheduler.step()
 | 
			
		||||
    optimizer.step()
 | 
			
		||||
```
 | 
			
		||||
							
								
								
									
										29
									
								
								docs/source/model_doc/auto.rst
									
									
									
									
									
										Normal file
									
								
							
							
						
						@ -0,0 +1,29 @@
 | 
			
		||||
AutoModels
 | 
			
		||||
-----------
 | 
			
		||||
 | 
			
		||||
In many cases, the architecture you want to use can be guessed from the name or the path of the pretrained model you are supplying to the ``from_pretrained`` method.
 | 
			
		||||
 | 
			
		||||
AutoClasses are here to do this job for you so that you automatically retreive the relevant model given the name/path to the pretrained weights/config/vocabulary:
 | 
			
		||||
 | 
			
		||||
Instantiating one of ``AutoModel``, ``AutoConfig`` and ``AutoTokenizer`` will directly create a class of the relevant architecture (ex: ``model = AutoModel.from_pretrained('bert-base-cased')`` will create a instance of ``BertModel``).
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
``AutoConfig``
 | 
			
		||||
~~~~~~~~~~~~~~~~~~~~~
 | 
			
		||||
 | 
			
		||||
.. autoclass:: transformers.AutoConfig
 | 
			
		||||
    :members:
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
``AutoModel``
 | 
			
		||||
~~~~~~~~~~~~~~~~~~~~~
 | 
			
		||||
 | 
			
		||||
.. autoclass:: transformers.AutoModel
 | 
			
		||||
    :members:
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
``AutoTokenizer``
 | 
			
		||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 | 
			
		||||
 | 
			
		||||
.. autoclass:: transformers.AutoTokenizer
 | 
			
		||||
    :members:
 | 
			
		||||
							
								
								
									
										128
									
								
								docs/source/model_doc/bert.rst
									
									
									
									
									
										Normal file
									
								
							
							
						
						@ -0,0 +1,128 @@
 | 
			
		||||
BERT
 | 
			
		||||
----------------------------------------------------
 | 
			
		||||
 | 
			
		||||
``BertConfig``
 | 
			
		||||
~~~~~~~~~~~~~~~~~~~~~
 | 
			
		||||
 | 
			
		||||
.. autoclass:: transformers.BertConfig
 | 
			
		||||
    :members:
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
``BertTokenizer``
 | 
			
		||||
~~~~~~~~~~~~~~~~~~~~~
 | 
			
		||||
 | 
			
		||||
.. autoclass:: transformers.BertTokenizer
 | 
			
		||||
    :members:
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
``BertModel``
 | 
			
		||||
~~~~~~~~~~~~~~~~~~~~
 | 
			
		||||
 | 
			
		||||
.. autoclass:: transformers.BertModel
 | 
			
		||||
    :members:
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
``BertForPreTraining``
 | 
			
		||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 | 
			
		||||
 | 
			
		||||
.. autoclass:: transformers.BertForPreTraining
 | 
			
		||||
    :members:
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
``BertForMaskedLM``
 | 
			
		||||
~~~~~~~~~~~~~~~~~~~~~~~~~~
 | 
			
		||||
 | 
			
		||||
.. autoclass:: transformers.BertForMaskedLM
 | 
			
		||||
    :members:
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
``BertForNextSentencePrediction``
 | 
			
		||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 | 
			
		||||
 | 
			
		||||
.. autoclass:: transformers.BertForNextSentencePrediction
 | 
			
		||||
    :members:
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
``BertForSequenceClassification``
 | 
			
		||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 | 
			
		||||
 | 
			
		||||
.. autoclass:: transformers.BertForSequenceClassification
 | 
			
		||||
    :members:
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
``BertForMultipleChoice``
 | 
			
		||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 | 
			
		||||
 | 
			
		||||
.. autoclass:: transformers.BertForMultipleChoice
 | 
			
		||||
    :members:
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
``BertForTokenClassification``
 | 
			
		||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 | 
			
		||||
 | 
			
		||||
.. autoclass:: transformers.BertForTokenClassification
 | 
			
		||||
    :members:
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
``BertForQuestionAnswering``
 | 
			
		||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 | 
			
		||||
 | 
			
		||||
.. autoclass:: transformers.BertForQuestionAnswering
 | 
			
		||||
    :members:
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
``TFBertModel``
 | 
			
		||||
~~~~~~~~~~~~~~~~~~~~
 | 
			
		||||
 | 
			
		||||
.. autoclass:: pytorch_transformers.TFBertModel
 | 
			
		||||
    :members:
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
``TFBertForPreTraining``
 | 
			
		||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 | 
			
		||||
 | 
			
		||||
.. autoclass:: pytorch_transformers.TFBertForPreTraining
 | 
			
		||||
    :members:
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
``TFBertForMaskedLM``
 | 
			
		||||
~~~~~~~~~~~~~~~~~~~~~~~~~~
 | 
			
		||||
 | 
			
		||||
.. autoclass:: pytorch_transformers.TFBertForMaskedLM
 | 
			
		||||
    :members:
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
``TFBertForNextSentencePrediction``
 | 
			
		||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 | 
			
		||||
 | 
			
		||||
.. autoclass:: pytorch_transformers.TFBertForNextSentencePrediction
 | 
			
		||||
    :members:
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
``TFBertForSequenceClassification``
 | 
			
		||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 | 
			
		||||
 | 
			
		||||
.. autoclass:: pytorch_transformers.TFBertForSequenceClassification
 | 
			
		||||
    :members:
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
``TFBertForMultipleChoice``
 | 
			
		||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 | 
			
		||||
 | 
			
		||||
.. autoclass:: pytorch_transformers.TFBertForMultipleChoice
 | 
			
		||||
    :members:
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
``TFBertForTokenClassification``
 | 
			
		||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 | 
			
		||||
 | 
			
		||||
.. autoclass:: pytorch_transformers.TFBertForTokenClassification
 | 
			
		||||
    :members:
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
``TFBertForQuestionAnswering``
 | 
			
		||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 | 
			
		||||
 | 
			
		||||
.. autoclass:: pytorch_transformers.TFBertForQuestionAnswering
 | 
			
		||||
    :members:
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										70
									
								
								docs/source/model_doc/distilbert.rst
									
									
									
									
									
										Normal file
									
								
							
							
						
						@ -0,0 +1,70 @@
 | 
			
		||||
DistilBERT
 | 
			
		||||
----------------------------------------------------
 | 
			
		||||
 | 
			
		||||
``DistilBertConfig``
 | 
			
		||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 | 
			
		||||
 | 
			
		||||
.. autoclass:: transformers.DistilBertConfig
 | 
			
		||||
    :members:
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
``DistilBertTokenizer``
 | 
			
		||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 | 
			
		||||
 | 
			
		||||
.. autoclass:: transformers.DistilBertTokenizer
 | 
			
		||||
    :members:
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
``DistilBertModel``
 | 
			
		||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 | 
			
		||||
 | 
			
		||||
.. autoclass:: transformers.DistilBertModel
 | 
			
		||||
    :members:
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
``DistilBertForMaskedLM``
 | 
			
		||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 | 
			
		||||
 | 
			
		||||
.. autoclass:: transformers.DistilBertForMaskedLM
 | 
			
		||||
    :members:
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
``DistilBertForSequenceClassification``
 | 
			
		||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 | 
			
		||||
 | 
			
		||||
.. autoclass:: transformers.DistilBertForSequenceClassification
 | 
			
		||||
    :members:
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
``DistilBertForQuestionAnswering``
 | 
			
		||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 | 
			
		||||
 | 
			
		||||
.. autoclass:: transformers.DistilBertForQuestionAnswering
 | 
			
		||||
    :members:
 | 
			
		||||
 | 
			
		||||
``TFDistilBertModel``
 | 
			
		||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 | 
			
		||||
 | 
			
		||||
.. autoclass:: pytorch_transformers.TFDistilBertModel
 | 
			
		||||
    :members:
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
``TFDistilBertForMaskedLM``
 | 
			
		||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 | 
			
		||||
 | 
			
		||||
.. autoclass:: pytorch_transformers.TFDistilBertForMaskedLM
 | 
			
		||||
    :members:
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
``TFDistilBertForSequenceClassification``
 | 
			
		||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 | 
			
		||||
 | 
			
		||||
.. autoclass:: pytorch_transformers.TFDistilBertForSequenceClassification
 | 
			
		||||
    :members:
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
``TFDistilBertForQuestionAnswering``
 | 
			
		||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 | 
			
		||||
 | 
			
		||||
.. autoclass:: pytorch_transformers.TFDistilBertForQuestionAnswering
 | 
			
		||||
    :members:
 | 
			
		||||
							
								
								
									
										57
									
								
								docs/source/model_doc/gpt.rst
									
									
									
									
									
										Normal file
									
								
							
							
						
						@ -0,0 +1,57 @@
 | 
			
		||||
OpenAI GPT
 | 
			
		||||
----------------------------------------------------
 | 
			
		||||
 | 
			
		||||
``OpenAIGPTConfig``
 | 
			
		||||
~~~~~~~~~~~~~~~~~~~~~
 | 
			
		||||
 | 
			
		||||
.. autoclass:: transformers.OpenAIGPTConfig
 | 
			
		||||
    :members:
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
``OpenAIGPTTokenizer``
 | 
			
		||||
~~~~~~~~~~~~~~~~~~~~~~~~~~
 | 
			
		||||
 | 
			
		||||
.. autoclass:: transformers.OpenAIGPTTokenizer
 | 
			
		||||
    :members:
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
``OpenAIGPTModel``
 | 
			
		||||
~~~~~~~~~~~~~~~~~~~~~~~~~
 | 
			
		||||
 | 
			
		||||
.. autoclass:: transformers.OpenAIGPTModel
 | 
			
		||||
    :members:
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
``OpenAIGPTLMHeadModel``
 | 
			
		||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 | 
			
		||||
 | 
			
		||||
.. autoclass:: transformers.OpenAIGPTLMHeadModel
 | 
			
		||||
    :members:
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
``OpenAIGPTDoubleHeadsModel``
 | 
			
		||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 | 
			
		||||
 | 
			
		||||
.. autoclass:: transformers.OpenAIGPTDoubleHeadsModel
 | 
			
		||||
    :members:
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
``TFOpenAIGPTModel``
 | 
			
		||||
~~~~~~~~~~~~~~~~~~~~~~~~~
 | 
			
		||||
 | 
			
		||||
.. autoclass:: pytorch_transformers.TFOpenAIGPTModel
 | 
			
		||||
    :members:
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
``TFOpenAIGPTLMHeadModel``
 | 
			
		||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 | 
			
		||||
 | 
			
		||||
.. autoclass:: pytorch_transformers.TFOpenAIGPTLMHeadModel
 | 
			
		||||
    :members:
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
``TFOpenAIGPTDoubleHeadsModel``
 | 
			
		||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 | 
			
		||||
 | 
			
		||||
.. autoclass:: pytorch_transformers.TFOpenAIGPTDoubleHeadsModel
 | 
			
		||||
    :members:
 | 
			
		||||
							
								
								
									
										57
									
								
								docs/source/model_doc/gpt2.rst
									
									
									
									
									
										Normal file
									
								
							
							
						
						@ -0,0 +1,57 @@
 | 
			
		||||
OpenAI GPT2
 | 
			
		||||
----------------------------------------------------
 | 
			
		||||
 | 
			
		||||
``GPT2Config``
 | 
			
		||||
~~~~~~~~~~~~~~~~~~~~~
 | 
			
		||||
 | 
			
		||||
.. autoclass:: transformers.GPT2Config
 | 
			
		||||
    :members:
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
``GPT2Tokenizer``
 | 
			
		||||
~~~~~~~~~~~~~~~~~~~~~
 | 
			
		||||
 | 
			
		||||
.. autoclass:: transformers.GPT2Tokenizer
 | 
			
		||||
    :members:
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
``GPT2Model``
 | 
			
		||||
~~~~~~~~~~~~~~~~~~~~~
 | 
			
		||||
 | 
			
		||||
.. autoclass:: transformers.GPT2Model
 | 
			
		||||
    :members:
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
``GPT2LMHeadModel``
 | 
			
		||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~
 | 
			
		||||
 | 
			
		||||
.. autoclass:: transformers.GPT2LMHeadModel
 | 
			
		||||
    :members:
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
``GPT2DoubleHeadsModel``
 | 
			
		||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 | 
			
		||||
 | 
			
		||||
.. autoclass:: transformers.GPT2DoubleHeadsModel
 | 
			
		||||
    :members:
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
``TFGPT2Model``
 | 
			
		||||
~~~~~~~~~~~~~~~~~~~~~
 | 
			
		||||
 | 
			
		||||
.. autoclass:: pytorch_transformers.TFGPT2Model
 | 
			
		||||
    :members:
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
``TFGPT2LMHeadModel``
 | 
			
		||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~
 | 
			
		||||
 | 
			
		||||
.. autoclass:: pytorch_transformers.TFGPT2LMHeadModel
 | 
			
		||||
    :members:
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
``TFGPT2DoubleHeadsModel``
 | 
			
		||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 | 
			
		||||
 | 
			
		||||
.. autoclass:: pytorch_transformers.TFGPT2DoubleHeadsModel
 | 
			
		||||
    :members:
 | 
			
		||||
							
								
								
									
										57
									
								
								docs/source/model_doc/roberta.rst
									
									
									
									
									
										Normal file
									
								
							
							
						
						@ -0,0 +1,57 @@
 | 
			
		||||
RoBERTa
 | 
			
		||||
----------------------------------------------------
 | 
			
		||||
 | 
			
		||||
``RobertaConfig``
 | 
			
		||||
~~~~~~~~~~~~~~~~~~~~~
 | 
			
		||||
 | 
			
		||||
.. autoclass:: transformers.RobertaConfig
 | 
			
		||||
    :members:
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
``RobertaTokenizer``
 | 
			
		||||
~~~~~~~~~~~~~~~~~~~~~
 | 
			
		||||
 | 
			
		||||
.. autoclass:: transformers.RobertaTokenizer
 | 
			
		||||
    :members:
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
``RobertaModel``
 | 
			
		||||
~~~~~~~~~~~~~~~~~~~~
 | 
			
		||||
 | 
			
		||||
.. autoclass:: transformers.RobertaModel
 | 
			
		||||
    :members:
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
``RobertaForMaskedLM``
 | 
			
		||||
~~~~~~~~~~~~~~~~~~~~~~~~~~
 | 
			
		||||
 | 
			
		||||
.. autoclass:: transformers.RobertaForMaskedLM
 | 
			
		||||
    :members:
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
``RobertaForSequenceClassification``
 | 
			
		||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 | 
			
		||||
 | 
			
		||||
.. autoclass:: transformers.RobertaForSequenceClassification
 | 
			
		||||
    :members:
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
``TFRobertaModel``
 | 
			
		||||
~~~~~~~~~~~~~~~~~~~~
 | 
			
		||||
 | 
			
		||||
.. autoclass:: pytorch_transformers.TFRobertaModel
 | 
			
		||||
    :members:
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
``TFRobertaForMaskedLM``
 | 
			
		||||
~~~~~~~~~~~~~~~~~~~~~~~~~~
 | 
			
		||||
 | 
			
		||||
.. autoclass:: pytorch_transformers.TFRobertaForMaskedLM
 | 
			
		||||
    :members:
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
``TFRobertaForSequenceClassification``
 | 
			
		||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 | 
			
		||||
 | 
			
		||||
.. autoclass:: pytorch_transformers.TFRobertaForSequenceClassification
 | 
			
		||||
    :members:
 | 
			
		||||
							
								
								
									
										44
									
								
								docs/source/model_doc/transformerxl.rst
									
									
									
									
									
										Normal file
									
								
							
							
						
						@ -0,0 +1,44 @@
 | 
			
		||||
Transformer XL
 | 
			
		||||
----------------------------------------------------
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
``TransfoXLConfig``
 | 
			
		||||
~~~~~~~~~~~~~~~~~~~~~
 | 
			
		||||
 | 
			
		||||
.. autoclass:: transformers.TransfoXLConfig
 | 
			
		||||
    :members:
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
``TransfoXLTokenizer``
 | 
			
		||||
~~~~~~~~~~~~~~~~~~~~~~~~~~
 | 
			
		||||
 | 
			
		||||
.. autoclass:: transformers.TransfoXLTokenizer
 | 
			
		||||
    :members:
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
``TransfoXLModel``
 | 
			
		||||
~~~~~~~~~~~~~~~~~~~~~~~~~~
 | 
			
		||||
 | 
			
		||||
.. autoclass:: transformers.TransfoXLModel
 | 
			
		||||
    :members:
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
``TransfoXLLMHeadModel``
 | 
			
		||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 | 
			
		||||
 | 
			
		||||
.. autoclass:: transformers.TransfoXLLMHeadModel
 | 
			
		||||
    :members:
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
``TFTransfoXLModel``
 | 
			
		||||
~~~~~~~~~~~~~~~~~~~~~~~~~~
 | 
			
		||||
 | 
			
		||||
.. autoclass:: pytorch_transformers.TFTransfoXLModel
 | 
			
		||||
    :members:
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
``TFTransfoXLLMHeadModel``
 | 
			
		||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 | 
			
		||||
 | 
			
		||||
.. autoclass:: pytorch_transformers.TFTransfoXLLMHeadModel
 | 
			
		||||
    :members:
 | 
			
		||||
							
								
								
									
										69
									
								
								docs/source/model_doc/xlm.rst
									
									
									
									
									
										Normal file
									
								
							
							
						
						@ -0,0 +1,69 @@
 | 
			
		||||
XLM
 | 
			
		||||
----------------------------------------------------
 | 
			
		||||
 | 
			
		||||
``XLMConfig``
 | 
			
		||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 | 
			
		||||
 | 
			
		||||
.. autoclass:: transformers.XLMConfig
 | 
			
		||||
    :members:
 | 
			
		||||
 | 
			
		||||
``XLMTokenizer``
 | 
			
		||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 | 
			
		||||
 | 
			
		||||
.. autoclass:: transformers.XLMTokenizer
 | 
			
		||||
    :members:
 | 
			
		||||
 | 
			
		||||
``XLMModel``
 | 
			
		||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 | 
			
		||||
 | 
			
		||||
.. autoclass:: transformers.XLMModel
 | 
			
		||||
    :members:
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
``XLMWithLMHeadModel``
 | 
			
		||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 | 
			
		||||
 | 
			
		||||
.. autoclass:: transformers.XLMWithLMHeadModel
 | 
			
		||||
    :members:
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
``XLMForSequenceClassification``
 | 
			
		||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 | 
			
		||||
 | 
			
		||||
.. autoclass:: transformers.XLMForSequenceClassification
 | 
			
		||||
    :members:
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
``XLMForQuestionAnswering``
 | 
			
		||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 | 
			
		||||
 | 
			
		||||
.. autoclass:: transformers.XLMForQuestionAnswering
 | 
			
		||||
    :members:
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
``TFXLMModel``
 | 
			
		||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 | 
			
		||||
 | 
			
		||||
.. autoclass:: pytorch_transformers.TFXLMModel
 | 
			
		||||
    :members:
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
``TFXLMWithLMHeadModel``
 | 
			
		||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 | 
			
		||||
 | 
			
		||||
.. autoclass:: pytorch_transformers.TFXLMWithLMHeadModel
 | 
			
		||||
    :members:
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
``TFXLMForSequenceClassification``
 | 
			
		||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 | 
			
		||||
 | 
			
		||||
.. autoclass:: pytorch_transformers.TFXLMForSequenceClassification
 | 
			
		||||
    :members:
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
``TFXLMForQuestionAnsweringSimple``
 | 
			
		||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 | 
			
		||||
 | 
			
		||||
.. autoclass:: pytorch_transformers.TFXLMForQuestionAnsweringSimple
 | 
			
		||||
    :members:
 | 
			
		||||
							
								
								
									
										71
									
								
								docs/source/model_doc/xlnet.rst
									
									
									
									
									
										Normal file
									
								
							
							
						
						@ -0,0 +1,71 @@
 | 
			
		||||
XLNet
 | 
			
		||||
----------------------------------------------------
 | 
			
		||||
 | 
			
		||||
``XLNetConfig``
 | 
			
		||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 | 
			
		||||
 | 
			
		||||
.. autoclass:: transformers.XLNetConfig
 | 
			
		||||
    :members:
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
``XLNetTokenizer``
 | 
			
		||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 | 
			
		||||
 | 
			
		||||
.. autoclass:: transformers.XLNetTokenizer
 | 
			
		||||
    :members:
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
``XLNetModel``
 | 
			
		||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 | 
			
		||||
 | 
			
		||||
.. autoclass:: transformers.XLNetModel
 | 
			
		||||
    :members:
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
``XLNetLMHeadModel``
 | 
			
		||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 | 
			
		||||
 | 
			
		||||
.. autoclass:: transformers.XLNetLMHeadModel
 | 
			
		||||
    :members:
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
``XLNetForSequenceClassification``
 | 
			
		||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 | 
			
		||||
 | 
			
		||||
.. autoclass:: transformers.XLNetForSequenceClassification
 | 
			
		||||
    :members:
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
``XLNetForQuestionAnswering``
 | 
			
		||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 | 
			
		||||
 | 
			
		||||
.. autoclass:: transformers.XLNetForQuestionAnswering
 | 
			
		||||
    :members:
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
``TFXLNetModel``
 | 
			
		||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 | 
			
		||||
 | 
			
		||||
.. autoclass:: pytorch_transformers.TFXLNetModel
 | 
			
		||||
    :members:
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
``TFXLNetLMHeadModel``
 | 
			
		||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 | 
			
		||||
 | 
			
		||||
.. autoclass:: pytorch_transformers.TFXLNetLMHeadModel
 | 
			
		||||
    :members:
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
``TFXLNetForSequenceClassification``
 | 
			
		||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 | 
			
		||||
 | 
			
		||||
.. autoclass:: pytorch_transformers.TFXLNetForSequenceClassification
 | 
			
		||||
    :members:
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
``TFXLNetForQuestionAnsweringSimple``
 | 
			
		||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 | 
			
		||||
 | 
			
		||||
.. autoclass:: pytorch_transformers.TFXLNetForQuestionAnsweringSimple
 | 
			
		||||
    :members:
 | 
			
		||||
							
								
								
									
										16
									
								
								docs/source/notebooks.rst
									
									
									
									
									
										Normal file
									
								
							
							
						
						@ -0,0 +1,16 @@
 | 
			
		||||
Notebooks
 | 
			
		||||
================================================
 | 
			
		||||
 | 
			
		||||
We include `three Jupyter Notebooks <https://github.com/huggingface/transformers/tree/master/notebooks>`_ that can be used to check that the predictions of the PyTorch model are identical to the predictions of the original TensorFlow model.
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
*
 | 
			
		||||
  The first NoteBook (\ `Comparing-TF-and-PT-models.ipynb <https://github.com/huggingface/transformers/blob/master/notebooks/Comparing-TF-and-PT-models.ipynb>`_\ ) extracts the hidden states of a full sequence on each layers of the TensorFlow and the PyTorch models and computes the standard deviation between them. In the given example, we get a standard deviation of 1.5e-7 to 9e-7 on the various hidden state of the models.
 | 
			
		||||
 | 
			
		||||
*
 | 
			
		||||
  The second NoteBook (\ `Comparing-TF-and-PT-models-SQuAD.ipynb <https://github.com/huggingface/transformers/blob/master/notebooks/Comparing-TF-and-PT-models-SQuAD.ipynb>`_\ ) compares the loss computed by the TensorFlow and the PyTorch models for identical initialization of the fine-tuning layer of the ``BertForQuestionAnswering`` and computes the standard deviation between them. In the given example, we get a standard deviation of 2.5e-7 between the models.
 | 
			
		||||
 | 
			
		||||
*
 | 
			
		||||
  The third NoteBook (\ `Comparing-TF-and-PT-models-MLM-NSP.ipynb <https://github.com/huggingface/transformers/blob/master/notebooks/Comparing-TF-and-PT-models-MLM-NSP.ipynb>`_\ ) compares the predictions computed by the TensorFlow and the PyTorch models for masked token language modeling using the pre-trained masked language modeling model.
 | 
			
		||||
 | 
			
		||||
Please follow the instructions given in the notebooks to run and modify them.
 | 
			
		||||
							
								
								
									
										123
									
								
								docs/source/pretrained_models.rst
									
									
									
									
									
										Normal file
									
								
							
							
						
						@ -0,0 +1,123 @@
 | 
			
		||||
Pretrained models
 | 
			
		||||
================================================
 | 
			
		||||
 | 
			
		||||
Here is the full list of the currently provided pretrained models together with a short presentation of each model.
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
+-------------------+------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
 | 
			
		||||
| Architecture      | Shortcut name                                              | Details of the model                                                                                                                  |
 | 
			
		||||
+===================+============================================================+=======================================================================================================================================+
 | 
			
		||||
| BERT              | ``bert-base-uncased``                                      | | 12-layer, 768-hidden, 12-heads, 110M parameters.                                                                                    |
 | 
			
		||||
|                   |                                                            | | Trained on lower-cased English text.                                                                                                |
 | 
			
		||||
|                   +------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
 | 
			
		||||
|                   | ``bert-large-uncased``                                     | | 24-layer, 1024-hidden, 16-heads, 340M parameters.                                                                                   |
 | 
			
		||||
|                   |                                                            | | Trained on lower-cased English text.                                                                                                |
 | 
			
		||||
|                   +------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
 | 
			
		||||
|                   | ``bert-base-cased``                                        | | 12-layer, 768-hidden, 12-heads, 110M parameters.                                                                                    |
 | 
			
		||||
|                   |                                                            | | Trained on cased English text.                                                                                                      |
 | 
			
		||||
|                   +------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
 | 
			
		||||
|                   | ``bert-large-cased``                                       | | 24-layer, 1024-hidden, 16-heads, 340M parameters.                                                                                   |
 | 
			
		||||
|                   |                                                            | | Trained on cased English text.                                                                                                      |
 | 
			
		||||
|                   +------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
 | 
			
		||||
|                   | ``bert-base-multilingual-uncased``                         | | (Original, not recommended) 12-layer, 768-hidden, 12-heads, 110M parameters.                                                        |
 | 
			
		||||
|                   |                                                            | | Trained on lower-cased text in the top 102 languages with the largest Wikipedias                                                    |
 | 
			
		||||
|                   |                                                            | (see `details <https://github.com/google-research/bert/blob/master/multilingual.md>`__).                                              |
 | 
			
		||||
|                   +------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
 | 
			
		||||
|                   | ``bert-base-multilingual-cased``                           | | (New, **recommended**) 12-layer, 768-hidden, 12-heads, 110M parameters.                                                             |
 | 
			
		||||
|                   |                                                            | | Trained on cased text in the top 104 languages with the largest Wikipedias                                                          |
 | 
			
		||||
|                   |                                                            | (see `details <https://github.com/google-research/bert/blob/master/multilingual.md>`__).                                              |
 | 
			
		||||
|                   +------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
 | 
			
		||||
|                   | ``bert-base-chinese``                                      | | 12-layer, 768-hidden, 12-heads, 110M parameters.                                                                                    |
 | 
			
		||||
|                   |                                                            | | Trained on cased Chinese Simplified and Traditional text.                                                                           |
 | 
			
		||||
|                   +------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
 | 
			
		||||
|                   | ``bert-base-german-cased``                                 | | 12-layer, 768-hidden, 12-heads, 110M parameters.                                                                                    |
 | 
			
		||||
|                   |                                                            | | Trained on cased German text by Deepset.ai                                                                                          |
 | 
			
		||||
|                   |                                                            | (see `details on deepset.ai website <https://deepset.ai/german-bert>`__).                                                             |
 | 
			
		||||
|                   +------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
 | 
			
		||||
|                   | ``bert-large-uncased-whole-word-masking``                  | | 24-layer, 1024-hidden, 16-heads, 340M parameters.                                                                                   |
 | 
			
		||||
|                   |                                                            | | Trained on lower-cased English text using Whole-Word-Masking                                                                        |
 | 
			
		||||
|                   |                                                            | (see `details <https://github.com/google-research/bert/#bert>`__).                                                                    |
 | 
			
		||||
|                   +------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
 | 
			
		||||
|                   | ``bert-large-cased-whole-word-masking``                    | | 24-layer, 1024-hidden, 16-heads, 340M parameters.                                                                                   |
 | 
			
		||||
|                   |                                                            | | Trained on cased English text using Whole-Word-Masking                                                                              |
 | 
			
		||||
|                   |                                                            | (see `details <https://github.com/google-research/bert/#bert>`__).                                                                    |
 | 
			
		||||
|                   +------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
 | 
			
		||||
|                   | ``bert-large-uncased-whole-word-masking-finetuned-squad``  | | 24-layer, 1024-hidden, 16-heads, 340M parameters.                                                                                   |
 | 
			
		||||
|                   |                                                            | | The ``bert-large-uncased-whole-word-masking`` model fine-tuned on SQuAD                                                             |
 | 
			
		||||
|                   |                                                            | (see details of fine-tuning in the `example section <https://github.com/huggingface/transformers/tree/master/examples>`__).           |
 | 
			
		||||
|                   +------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
 | 
			
		||||
|                   | ``bert-large-cased-whole-word-masking-finetuned-squad``    | | 24-layer, 1024-hidden, 16-heads, 340M parameters                                                                                    |
 | 
			
		||||
|                   |                                                            | | The ``bert-large-cased-whole-word-masking`` model fine-tuned on SQuAD                                                               |
 | 
			
		||||
|                   |                                                            | (see `details of fine-tuning in the example section <https://huggingface.co/transformers/examples.html>`__)                           |
 | 
			
		||||
|                   +------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
 | 
			
		||||
|                   | ``bert-base-cased-finetuned-mrpc``                         | | 12-layer, 768-hidden, 12-heads, 110M parameters.                                                                                    |
 | 
			
		||||
|                   |                                                            | | The ``bert-base-cased`` model fine-tuned on MRPC                                                                                    |
 | 
			
		||||
|                   |                                                            | (see `details of fine-tuning in the example section <https://huggingface.co/transformers/examples.html>`__)                           |
 | 
			
		||||
+-------------------+------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
 | 
			
		||||
| GPT               | ``openai-gpt``                                             | | 12-layer, 768-hidden, 12-heads, 110M parameters.                                                                                    |
 | 
			
		||||
|                   |                                                            | | OpenAI GPT English model                                                                                                            |
 | 
			
		||||
+-------------------+------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
 | 
			
		||||
| GPT-2             | ``gpt2``                                                   | | 12-layer, 768-hidden, 12-heads, 117M parameters.                                                                                    |
 | 
			
		||||
|                   |                                                            | | OpenAI GPT-2 English model                                                                                                          |
 | 
			
		||||
|                   +------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
 | 
			
		||||
|                   | ``gpt2-medium``                                            | | 24-layer, 1024-hidden, 16-heads, 345M parameters.                                                                                   |
 | 
			
		||||
|                   |                                                            | | OpenAI's Medium-sized GPT-2 English model                                                                                           |
 | 
			
		||||
|                   +------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
 | 
			
		||||
|                   | ``gpt2-large``                                             | | 36-layer, 1280-hidden, 20-heads, 774M parameters.                                                                                   |
 | 
			
		||||
|                   |                                                            | | OpenAI's Large-sized GPT-2 English model                                                                                            |
 | 
			
		||||
+-------------------+------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
 | 
			
		||||
| Transformer-XL    | ``transfo-xl-wt103``                                       | | 18-layer, 1024-hidden, 16-heads, 257M parameters.                                                                                   |
 | 
			
		||||
|                   |                                                            | | English model trained on wikitext-103                                                                                               |
 | 
			
		||||
+-------------------+------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
 | 
			
		||||
| XLNet             | ``xlnet-base-cased``                                       | | 12-layer, 768-hidden, 12-heads, 110M parameters.                                                                                    |
 | 
			
		||||
|                   |                                                            | | XLNet English model                                                                                                                 |
 | 
			
		||||
|                   +------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
 | 
			
		||||
|                   | ``xlnet-large-cased``                                      | | 24-layer, 1024-hidden, 16-heads, 340M parameters.                                                                                   |
 | 
			
		||||
|                   |                                                            | | XLNet Large English model                                                                                                           |
 | 
			
		||||
+-------------------+------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
 | 
			
		||||
| XLM               | ``xlm-mlm-en-2048``                                        | | 12-layer, 2048-hidden, 16-heads                                                                                                     |
 | 
			
		||||
|                   |                                                            | | XLM English model                                                                                                                   |
 | 
			
		||||
|                   +------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
 | 
			
		||||
|                   | ``xlm-mlm-ende-1024``                                      | | 6-layer, 1024-hidden, 8-heads                                                                                                       |
 | 
			
		||||
|                   |                                                            | | XLM English-German model trained on the concatenation of English and German wikipedia                                               |
 | 
			
		||||
|                   +------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
 | 
			
		||||
|                   | ``xlm-mlm-enfr-1024``                                      | | 6-layer, 1024-hidden, 8-heads                                                                                                       |
 | 
			
		||||
|                   |                                                            | | XLM English-French model trained on the concatenation of English and French wikipedia                                               |
 | 
			
		||||
|                   +------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
 | 
			
		||||
|                   | ``xlm-mlm-enro-1024``                                      | | 6-layer, 1024-hidden, 8-heads                                                                                                       |
 | 
			
		||||
|                   |                                                            | | XLM English-Romanian Multi-language model                                                                                           |
 | 
			
		||||
|                   +------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
 | 
			
		||||
|                   | ``xlm-mlm-xnli15-1024``                                    | | 12-layer, 1024-hidden, 8-heads                                                                                                      |
 | 
			
		||||
|                   |                                                            | | XLM Model pre-trained with MLM on the `15 XNLI languages <https://github.com/facebookresearch/XNLI>`__.                             |
 | 
			
		||||
|                   +------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
 | 
			
		||||
|                   | ``xlm-mlm-tlm-xnli15-1024``                                | | 12-layer, 1024-hidden, 8-heads                                                                                                      |
 | 
			
		||||
|                   |                                                            | | XLM Model pre-trained with MLM + TLM on the `15 XNLI languages <https://github.com/facebookresearch/XNLI>`__.                       |
 | 
			
		||||
|                   +------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
 | 
			
		||||
|                   | ``xlm-clm-enfr-1024``                                      | | 6-layer, 1024-hidden, 8-heads                                                                                                       |
 | 
			
		||||
|                   |                                                            | | XLM English-French model trained with CLM (Causal Language Modeling) on the concatenation of English and French wikipedia           |
 | 
			
		||||
|                   +------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
 | 
			
		||||
|                   | ``xlm-clm-ende-1024``                                      | | 6-layer, 1024-hidden, 8-heads                                                                                                       |
 | 
			
		||||
|                   |                                                            | | XLM English-German model trained with CLM (Causal Language Modeling) on the concatenation of English and German wikipedia           |
 | 
			
		||||
+-------------------+------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
 | 
			
		||||
| RoBERTa           | ``roberta-base``                                           | | 12-layer, 768-hidden, 12-heads, 125M parameters                                                                                     |
 | 
			
		||||
|                   |                                                            | | RoBERTa using the BERT-base architecture                                                                                            |
 | 
			
		||||
|                   |                                                            | (see `details <https://github.com/pytorch/fairseq/tree/master/examples/roberta>`__)                                                   |
 | 
			
		||||
|                   +------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
 | 
			
		||||
|                   | ``roberta-large``                                          | | 24-layer, 1024-hidden, 16-heads, 355M parameters                                                                                    |
 | 
			
		||||
|                   |                                                            | | RoBERTa using the BERT-large architecture                                                                                           |
 | 
			
		||||
|                   |                                                            | (see `details <https://github.com/pytorch/fairseq/tree/master/examples/roberta>`__)                                                   |
 | 
			
		||||
|                   +------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
 | 
			
		||||
|                   | ``roberta-large-mnli``                                     | | 24-layer, 1024-hidden, 16-heads, 355M parameters                                                                                    |
 | 
			
		||||
|                   |                                                            | | ``roberta-large`` fine-tuned on `MNLI <http://www.nyu.edu/projects/bowman/multinli/>`__.                                            |
 | 
			
		||||
|                   |                                                            | (see `details <https://github.com/pytorch/fairseq/tree/master/examples/roberta>`__)                                                   |
 | 
			
		||||
+-------------------+------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
 | 
			
		||||
| DistilBERT        | ``distilbert-base-uncased``                                | | 6-layer, 768-hidden, 12-heads, 66M parameters                                                                                       |
 | 
			
		||||
|                   |                                                            | | The DistilBERT model distilled from the BERT model `bert-base-uncased` checkpoint                                                   |
 | 
			
		||||
|                   |                                                            | (see `details <https://medium.com/huggingface/distilbert-8cf3380435b5>`__)                                                            |
 | 
			
		||||
|                   +------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
 | 
			
		||||
|                   | ``distilbert-base-uncased-distilled-squad``                | | 6-layer, 768-hidden, 12-heads, 66M parameters                                                                                       |
 | 
			
		||||
|                   |                                                            | | The DistilBERT model distilled from the BERT model `bert-base-uncased` checkpoint, with an additional linear layer.                 |
 | 
			
		||||
|                   |                                                            | (see `details <https://medium.com/huggingface/distilbert-8cf3380435b5>`__)                                                            |
 | 
			
		||||
+-------------------+------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+
 | 
			
		||||
 | 
			
		||||
.. <https://huggingface.co/transformers/examples.html>`__
 | 
			
		||||
							
								
								
									
										190
									
								
								docs/source/quickstart.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						@ -0,0 +1,190 @@
 | 
			
		||||
# Quickstart
 | 
			
		||||
 | 
			
		||||
## Philosophy
 | 
			
		||||
 | 
			
		||||
Transformers is an opinionated library built for NLP researchers seeking to use/study/extend large-scale transformers models.
 | 
			
		||||
 | 
			
		||||
The library was designed with two strong goals in mind:
 | 
			
		||||
 | 
			
		||||
- be as easy and fast to use as possible:
 | 
			
		||||
 | 
			
		||||
  - we strongly limited the number of user-facing abstractions to learn, in fact there are almost no abstractions, just three standard classes required to use each model: configuration, models and tokenizer,
 | 
			
		||||
  - all of these classes can be initialized in a simple and unified way from pretrained instances by using a common `from_pretrained()` instantiation method which will take care of downloading (if needed), caching and loading the related class from a pretrained instance supplied in the library or your own saved instance.
 | 
			
		||||
  - as a consequence, this library is NOT a modular toolbox of building blocks for neural nets. If you want to extend/build-upon the library, just use regular Python/PyTorch modules and inherit from the base classes of the library to reuse functionalities like model loading/saving.
 | 
			
		||||
 | 
			
		||||
- provide state-of-the-art models with performances as close as possible to the original models:
 | 
			
		||||
 | 
			
		||||
  - we provide at least one example for each architecture which reproduces a result provided by the official authors of said architecture,
 | 
			
		||||
  - the code is usually as close to the original code base as possible which means some PyTorch code may be not as *pytorchic* as it could be as a result of being converted TensorFlow code.
 | 
			
		||||
 | 
			
		||||
A few other goals:
 | 
			
		||||
 | 
			
		||||
- expose the models internals as consistently as possible:
 | 
			
		||||
 | 
			
		||||
  - we give access, using a single API to the full hidden-states and attention weights,
 | 
			
		||||
  - tokenizer and base model's API are standardized to easily switch between models.
 | 
			
		||||
 | 
			
		||||
- incorporate a subjective selection of promising tools for fine-tuning/investiguating these models:
 | 
			
		||||
 | 
			
		||||
  - a simple/consistent way to add new tokens to the vocabulary and embeddings for fine-tuning,
 | 
			
		||||
  - simple ways to mask and prune transformer heads.
 | 
			
		||||
 | 
			
		||||
## Main concepts
 | 
			
		||||
 | 
			
		||||
The library is build around three type of classes for each models:
 | 
			
		||||
 | 
			
		||||
- **model classes** which are PyTorch models (`torch.nn.Modules`) of the 6 models architectures currently provided in the library, e.g. `BertModel`
 | 
			
		||||
- **configuration classes** which store all the parameters required to build a model, e.g. `BertConfig`. You don't always need to instantiate these your-self, in particular if you are using a pretrained model without any modification, creating the model will automatically take care of instantiating the configuration (which is part of the model)
 | 
			
		||||
- **tokenizer classes** which store the vocabulary for each model and provide methods for encoding/decoding strings in list of token embeddings indices to be fed to a model, e.g. `BertTokenizer`
 | 
			
		||||
 | 
			
		||||
All these classes can be instantiated from pretrained instances and saved locally using two methods:
 | 
			
		||||
 | 
			
		||||
- `from_pretrained()` let you instantiate a model/configuration/tokenizer from a pretrained version either provided by the library itself (currently 27 models are provided as listed [here](https://huggingface.co/transformers/pretrained_models.html)) or stored locally (or on a server) by the user,
 | 
			
		||||
- `save_pretrained()` let you save a model/configuration/tokenizer locally so that it can be reloaded using `from_pretrained()`.
 | 
			
		||||
 | 
			
		||||
We'll finish this quickstart tour by going through a few simple quick-start examples to see how we can instantiate and use these classes. The rest of the documentation is organized in two parts:
 | 
			
		||||
 | 
			
		||||
- the **MAIN CLASSES** section details the common functionalities/method/attributes of the three main type of classes (configuration, model, tokenizer) plus some optimization related classes provided as utilities for training,
 | 
			
		||||
- the **PACKAGE REFERENCE** section details all the variants of each class for each model architectures and in particular the input/output that you should expect when calling each of them.
 | 
			
		||||
 | 
			
		||||
## Quick tour: Usage
 | 
			
		||||
 | 
			
		||||
Here are two examples showcasing a few `Bert` and `GPT2` classes and pre-trained models.
 | 
			
		||||
 | 
			
		||||
See full API reference for examples for each model classe.
 | 
			
		||||
 | 
			
		||||
### BERT example
 | 
			
		||||
 | 
			
		||||
Let's start by preparing a tokenized input (a list of token embeddings indices to be fed to Bert) from a text string using `BertTokenizer`
 | 
			
		||||
 | 
			
		||||
```python
 | 
			
		||||
import torch
 | 
			
		||||
from transformers import BertTokenizer, BertModel, BertForMaskedLM
 | 
			
		||||
 | 
			
		||||
# OPTIONAL: if you want to have more information on what's happening under the hood, activate the logger as follows
 | 
			
		||||
import logging
 | 
			
		||||
logging.basicConfig(level=logging.INFO)
 | 
			
		||||
 | 
			
		||||
# Load pre-trained model tokenizer (vocabulary)
 | 
			
		||||
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
 | 
			
		||||
 | 
			
		||||
# Tokenize input
 | 
			
		||||
text = "[CLS] Who was Jim Henson ? [SEP] Jim Henson was a puppeteer [SEP]"
 | 
			
		||||
tokenized_text = tokenizer.tokenize(text)
 | 
			
		||||
 | 
			
		||||
# Mask a token that we will try to predict back with `BertForMaskedLM`
 | 
			
		||||
masked_index = 8
 | 
			
		||||
tokenized_text[masked_index] = '[MASK]'
 | 
			
		||||
assert tokenized_text == ['[CLS]', 'who', 'was', 'jim', 'henson', '?', '[SEP]', 'jim', '[MASK]', 'was', 'a', 'puppet', '##eer', '[SEP]']
 | 
			
		||||
 | 
			
		||||
# Convert token to vocabulary indices
 | 
			
		||||
indexed_tokens = tokenizer.convert_tokens_to_ids(tokenized_text)
 | 
			
		||||
# Define sentence A and B indices associated to 1st and 2nd sentences (see paper)
 | 
			
		||||
segments_ids = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1]
 | 
			
		||||
 | 
			
		||||
# Convert inputs to PyTorch tensors
 | 
			
		||||
tokens_tensor = torch.tensor([indexed_tokens])
 | 
			
		||||
segments_tensors = torch.tensor([segments_ids])
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
Let's see how we can use `BertModel` to encode our inputs in hidden-states:
 | 
			
		||||
 | 
			
		||||
```python
 | 
			
		||||
# Load pre-trained model (weights)
 | 
			
		||||
model = BertModel.from_pretrained('bert-base-uncased')
 | 
			
		||||
 | 
			
		||||
# Set the model in evaluation mode to desactivate the DropOut modules
 | 
			
		||||
# This is IMPORTANT to have reproductible results during evaluation!
 | 
			
		||||
model.eval()
 | 
			
		||||
 | 
			
		||||
# If you have a GPU, put everything on cuda
 | 
			
		||||
tokens_tensor = tokens_tensor.to('cuda')
 | 
			
		||||
segments_tensors = segments_tensors.to('cuda')
 | 
			
		||||
model.to('cuda')
 | 
			
		||||
 | 
			
		||||
# Predict hidden states features for each layer
 | 
			
		||||
with torch.no_grad():
 | 
			
		||||
    # See the models docstrings for the detail of the inputs
 | 
			
		||||
    outputs = model(tokens_tensor, token_type_ids=segments_tensors)
 | 
			
		||||
    # Transformers models always output tuples.
 | 
			
		||||
    # See the models docstrings for the detail of all the outputs
 | 
			
		||||
    # In our case, the first element is the hidden state of the last layer of the Bert model
 | 
			
		||||
    encoded_layers = outputs[0]
 | 
			
		||||
# We have encoded our input sequence in a FloatTensor of shape (batch size, sequence length, model hidden dimension)
 | 
			
		||||
assert tuple(encoded_layers.shape) == (1, len(indexed_tokens), model.config.hidden_size)
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
And how to use `BertForMaskedLM` to predict a masked token:
 | 
			
		||||
 | 
			
		||||
```python
 | 
			
		||||
# Load pre-trained model (weights)
 | 
			
		||||
model = BertForMaskedLM.from_pretrained('bert-base-uncased')
 | 
			
		||||
model.eval()
 | 
			
		||||
 | 
			
		||||
# If you have a GPU, put everything on cuda
 | 
			
		||||
tokens_tensor = tokens_tensor.to('cuda')
 | 
			
		||||
segments_tensors = segments_tensors.to('cuda')
 | 
			
		||||
model.to('cuda')
 | 
			
		||||
 | 
			
		||||
# Predict all tokens
 | 
			
		||||
with torch.no_grad():
 | 
			
		||||
    outputs = model(tokens_tensor, token_type_ids=segments_tensors)
 | 
			
		||||
    predictions = outputs[0]
 | 
			
		||||
 | 
			
		||||
# confirm we were able to predict 'henson'
 | 
			
		||||
predicted_index = torch.argmax(predictions[0, masked_index]).item()
 | 
			
		||||
predicted_token = tokenizer.convert_ids_to_tokens([predicted_index])[0]
 | 
			
		||||
assert predicted_token == 'henson'
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
### OpenAI GPT-2
 | 
			
		||||
 | 
			
		||||
Here is a quick-start example using `GPT2Tokenizer` and `GPT2LMHeadModel` class with OpenAI's pre-trained model to predict the next token from a text prompt.
 | 
			
		||||
 | 
			
		||||
First let's prepare a tokenized input from our text string using `GPT2Tokenizer`
 | 
			
		||||
 | 
			
		||||
```python
 | 
			
		||||
import torch
 | 
			
		||||
from transformers import GPT2Tokenizer, GPT2LMHeadModel
 | 
			
		||||
 | 
			
		||||
# OPTIONAL: if you want to have more information on what's happening, activate the logger as follows
 | 
			
		||||
import logging
 | 
			
		||||
logging.basicConfig(level=logging.INFO)
 | 
			
		||||
 | 
			
		||||
# Load pre-trained model tokenizer (vocabulary)
 | 
			
		||||
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
 | 
			
		||||
 | 
			
		||||
# Encode a text inputs
 | 
			
		||||
text = "Who was Jim Henson ? Jim Henson was a"
 | 
			
		||||
indexed_tokens = tokenizer.encode(text)
 | 
			
		||||
 | 
			
		||||
# Convert indexed tokens in a PyTorch tensor
 | 
			
		||||
tokens_tensor = torch.tensor([indexed_tokens])
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
Let's see how to use `GPT2LMHeadModel` to generate the next token following our text:
 | 
			
		||||
 | 
			
		||||
```python
 | 
			
		||||
# Load pre-trained model (weights)
 | 
			
		||||
model = GPT2LMHeadModel.from_pretrained('gpt2')
 | 
			
		||||
 | 
			
		||||
# Set the model in evaluation mode to desactivate the DropOut modules
 | 
			
		||||
# This is IMPORTANT to have reproductible results during evaluation!
 | 
			
		||||
model.eval()
 | 
			
		||||
 | 
			
		||||
# If you have a GPU, put everything on cuda
 | 
			
		||||
tokens_tensor = tokens_tensor.to('cuda')
 | 
			
		||||
model.to('cuda')
 | 
			
		||||
 | 
			
		||||
# Predict all tokens
 | 
			
		||||
with torch.no_grad():
 | 
			
		||||
    outputs = model(tokens_tensor)
 | 
			
		||||
    predictions = outputs[0]
 | 
			
		||||
 | 
			
		||||
# get the predicted next sub-word (in our case, the word 'man')
 | 
			
		||||
predicted_index = torch.argmax(predictions[0, -1, :]).item()
 | 
			
		||||
predicted_text = tokenizer.decode(indexed_tokens + [predicted_index])
 | 
			
		||||
assert predicted_text == 'Who was Jim Henson? Jim Henson was a man'
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
Examples for each model class of each model architecture (Bert, GPT, GPT-2, Transformer-XL, XLNet and XLM) can be found in the [documentation](#documentation).
 | 
			
		||||
							
								
								
									
										188
									
								
								docs/source/serialization.rst
									
									
									
									
									
										Normal file
									
								
							
							
						
						@ -0,0 +1,188 @@
 | 
			
		||||
Loading Google AI or OpenAI pre-trained weights or PyTorch dump
 | 
			
		||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 | 
			
		||||
 | 
			
		||||
``from_pretrained()`` method
 | 
			
		||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 | 
			
		||||
 | 
			
		||||
To load one of Google AI's, OpenAI's pre-trained models or a PyTorch saved model (an instance of ``BertForPreTraining`` saved with ``torch.save()``\ ), the PyTorch model classes and the tokenizer can be instantiated using the ``from_pretrained()`` method:
 | 
			
		||||
 | 
			
		||||
.. code-block:: python
 | 
			
		||||
 | 
			
		||||
   model = BERT_CLASS.from_pretrained(PRE_TRAINED_MODEL_NAME_OR_PATH, cache_dir=None, from_tf=False, state_dict=None, *input, **kwargs)
 | 
			
		||||
 | 
			
		||||
where
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
* ``BERT_CLASS`` is either a tokenizer to load the vocabulary (\ ``BertTokenizer`` or ``OpenAIGPTTokenizer`` classes) or one of the eight BERT or three OpenAI GPT PyTorch model classes (to load the pre-trained weights): ``BertModel``\ , ``BertForMaskedLM``\ , ``BertForNextSentencePrediction``\ , ``BertForPreTraining``\ , ``BertForSequenceClassification``\ , ``BertForTokenClassification``\ , ``BertForMultipleChoice``\ , ``BertForQuestionAnswering``\ , ``OpenAIGPTModel``\ , ``OpenAIGPTLMHeadModel`` or ``OpenAIGPTDoubleHeadsModel``\ , and
 | 
			
		||||
*
 | 
			
		||||
  ``PRE_TRAINED_MODEL_NAME_OR_PATH`` is either:
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
  *
 | 
			
		||||
    the shortcut name of a Google AI's or OpenAI's pre-trained model selected in the list:
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    * ``bert-base-uncased``: 12-layer, 768-hidden, 12-heads, 110M parameters
 | 
			
		||||
    * ``bert-large-uncased``: 24-layer, 1024-hidden, 16-heads, 340M parameters
 | 
			
		||||
    * ``bert-base-cased``: 12-layer, 768-hidden, 12-heads , 110M parameters
 | 
			
		||||
    * ``bert-large-cased``: 24-layer, 1024-hidden, 16-heads, 340M parameters
 | 
			
		||||
    * ``bert-base-multilingual-uncased``: (Orig, not recommended) 102 languages, 12-layer, 768-hidden, 12-heads, 110M parameters
 | 
			
		||||
    * ``bert-base-multilingual-cased``: **(New, recommended)** 104 languages, 12-layer, 768-hidden, 12-heads, 110M parameters
 | 
			
		||||
    * ``bert-base-chinese``: Chinese Simplified and Traditional, 12-layer, 768-hidden, 12-heads, 110M parameters
 | 
			
		||||
    * ``bert-base-german-cased``: Trained on German data only, 12-layer, 768-hidden, 12-heads, 110M parameters `Performance Evaluation <https://deepset.ai/german-bert>`__
 | 
			
		||||
    * ``bert-large-uncased-whole-word-masking``: 24-layer, 1024-hidden, 16-heads, 340M parameters - Trained with Whole Word Masking (mask all of the the tokens corresponding to a word at once)
 | 
			
		||||
    * ``bert-large-cased-whole-word-masking``: 24-layer, 1024-hidden, 16-heads, 340M parameters - Trained with Whole Word Masking (mask all of the the tokens corresponding to a word at once)
 | 
			
		||||
    * ``bert-large-uncased-whole-word-masking-finetuned-squad``: The ``bert-large-uncased-whole-word-masking`` model finetuned on SQuAD (using the ``run_bert_squad.py`` examples). Results: *exact_match: 86.91579943235573, f1: 93.1532499015869*
 | 
			
		||||
    * ``openai-gpt``: OpenAI GPT English model, 12-layer, 768-hidden, 12-heads, 110M parameters
 | 
			
		||||
    * ``gpt2``: OpenAI GPT-2 English model, 12-layer, 768-hidden, 12-heads, 117M parameters
 | 
			
		||||
    * ``gpt2-medium``: OpenAI GPT-2 English model, 24-layer, 1024-hidden, 16-heads, 345M parameters
 | 
			
		||||
    * ``transfo-xl-wt103``: Transformer-XL English model trained on wikitext-103, 18-layer, 1024-hidden, 16-heads, 257M parameters
 | 
			
		||||
 | 
			
		||||
  *
 | 
			
		||||
    a path or url to a pretrained model archive containing:
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    * ``bert_config.json`` or ``openai_gpt_config.json`` a configuration file for the model, and
 | 
			
		||||
    * ``pytorch_model.bin`` a PyTorch dump of a pre-trained instance of ``BertForPreTraining``\ , ``OpenAIGPTModel``\ , ``TransfoXLModel``\ , ``GPT2LMHeadModel`` (saved with the usual ``torch.save()``\ )
 | 
			
		||||
 | 
			
		||||
  If ``PRE_TRAINED_MODEL_NAME_OR_PATH`` is a shortcut name, the pre-trained weights will be downloaded from AWS S3 (see the links `here <https://github.com/huggingface/transformers/blob/master/transformers/modeling_bert.py>`__\ ) and stored in a cache folder to avoid future download (the cache folder can be found at ``~/.pytorch_pretrained_bert/``\ ).
 | 
			
		||||
 | 
			
		||||
*
 | 
			
		||||
  ``cache_dir`` can be an optional path to a specific directory to download and cache the pre-trained model weights. This option is useful in particular when you are using distributed training: to avoid concurrent access to the same weights you can set for example ``cache_dir='./pretrained_model_{}'.format(args.local_rank)`` (see the section on distributed training for more information).
 | 
			
		||||
 | 
			
		||||
* ``from_tf``\ : should we load the weights from a locally saved TensorFlow checkpoint
 | 
			
		||||
* ``state_dict``\ : an optional state dictionary (collections.OrderedDict object) to use instead of Google pre-trained models
 | 
			
		||||
* ``*inputs``\ , `**kwargs`: additional input for the specific Bert class (ex: num_labels for BertForSequenceClassification)
 | 
			
		||||
 | 
			
		||||
``Uncased`` means that the text has been lowercased before WordPiece tokenization, e.g., ``John Smith`` becomes ``john smith``. The Uncased model also strips out any accent markers. ``Cased`` means that the true case and accent markers are preserved. Typically, the Uncased model is better unless you know that case information is important for your task (e.g., Named Entity Recognition or Part-of-Speech tagging). For information about the Multilingual and Chinese model, see the `Multilingual README <https://github.com/google-research/bert/blob/master/multilingual.md>`__ or the original TensorFlow repository.
 | 
			
		||||
 | 
			
		||||
When using an ``uncased model``\ , make sure to pass ``--do_lower_case`` to the example training scripts (or pass ``do_lower_case=True`` to FullTokenizer if you're using your own script and loading the tokenizer your-self.).
 | 
			
		||||
 | 
			
		||||
Examples:
 | 
			
		||||
 | 
			
		||||
.. code-block:: python
 | 
			
		||||
 | 
			
		||||
   # BERT
 | 
			
		||||
   tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True, do_basic_tokenize=True)
 | 
			
		||||
   model = BertForSequenceClassification.from_pretrained('bert-base-uncased')
 | 
			
		||||
 | 
			
		||||
   # OpenAI GPT
 | 
			
		||||
   tokenizer = OpenAIGPTTokenizer.from_pretrained('openai-gpt')
 | 
			
		||||
   model = OpenAIGPTModel.from_pretrained('openai-gpt')
 | 
			
		||||
 | 
			
		||||
   # Transformer-XL
 | 
			
		||||
   tokenizer = TransfoXLTokenizer.from_pretrained('transfo-xl-wt103')
 | 
			
		||||
   model = TransfoXLModel.from_pretrained('transfo-xl-wt103')
 | 
			
		||||
 | 
			
		||||
   # OpenAI GPT-2
 | 
			
		||||
   tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
 | 
			
		||||
   model = GPT2Model.from_pretrained('gpt2')
 | 
			
		||||
 | 
			
		||||
Cache directory
 | 
			
		||||
~~~~~~~~~~~~~~~
 | 
			
		||||
 | 
			
		||||
``pytorch_pretrained_bert`` save the pretrained weights in a cache directory which is located at (in this order of priority):
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
* ``cache_dir`` optional arguments to the ``from_pretrained()`` method (see above),
 | 
			
		||||
* shell environment variable ``PYTORCH_PRETRAINED_BERT_CACHE``\ ,
 | 
			
		||||
* PyTorch cache home + ``/pytorch_pretrained_bert/``
 | 
			
		||||
  where PyTorch cache home is defined by (in this order):
 | 
			
		||||
 | 
			
		||||
  * shell environment variable ``ENV_TORCH_HOME``
 | 
			
		||||
  * shell environment variable ``ENV_XDG_CACHE_HOME`` + ``/torch/``\ )
 | 
			
		||||
  * default: ``~/.cache/torch/``
 | 
			
		||||
 | 
			
		||||
Usually, if you don't set any specific environment variable, ``pytorch_pretrained_bert`` cache will be at ``~/.cache/torch/pytorch_pretrained_bert/``.
 | 
			
		||||
 | 
			
		||||
You can alsways safely delete ``pytorch_pretrained_bert`` cache but the pretrained model weights and vocabulary files wil have to be re-downloaded from our S3.
 | 
			
		||||
 | 
			
		||||
Serialization best-practices
 | 
			
		||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 | 
			
		||||
 | 
			
		||||
This section explain how you can save and re-load a fine-tuned model (BERT, GPT, GPT-2 and Transformer-XL).
 | 
			
		||||
There are three types of files you need to save to be able to reload a fine-tuned model:
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
* the model it-self which should be saved following PyTorch serialization `best practices <https://pytorch.org/docs/stable/notes/serialization.html#best-practices>`__\ ,
 | 
			
		||||
* the configuration file of the model which is saved as a JSON file, and
 | 
			
		||||
* the vocabulary (and the merges for the BPE-based models GPT and GPT-2).
 | 
			
		||||
 | 
			
		||||
The *default filenames* of these files are as follow:
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
* the model weights file: ``pytorch_model.bin``\ ,
 | 
			
		||||
* the configuration file: ``config.json``\ ,
 | 
			
		||||
* the vocabulary file: ``vocab.txt`` for BERT and Transformer-XL, ``vocab.json`` for GPT/GPT-2 (BPE vocabulary),
 | 
			
		||||
* for GPT/GPT-2 (BPE vocabulary) the additional merges file: ``merges.txt``.
 | 
			
		||||
 | 
			
		||||
**If you save a model using these *default filenames*\ , you can then re-load the model and tokenizer using the ``from_pretrained()`` method.**
 | 
			
		||||
 | 
			
		||||
Here is the recommended way of saving the model, configuration and vocabulary to an ``output_dir`` directory and reloading the model and tokenizer afterwards:
 | 
			
		||||
 | 
			
		||||
.. code-block:: python
 | 
			
		||||
 | 
			
		||||
   from transformers import WEIGHTS_NAME, CONFIG_NAME
 | 
			
		||||
 | 
			
		||||
   output_dir = "./models/"
 | 
			
		||||
 | 
			
		||||
   # Step 1: Save a model, configuration and vocabulary that you have fine-tuned
 | 
			
		||||
 | 
			
		||||
   # If we have a distributed model, save only the encapsulated model
 | 
			
		||||
   # (it was wrapped in PyTorch DistributedDataParallel or DataParallel)
 | 
			
		||||
   model_to_save = model.module if hasattr(model, 'module') else model
 | 
			
		||||
 | 
			
		||||
   # If we save using the predefined names, we can load using `from_pretrained`
 | 
			
		||||
   output_model_file = os.path.join(output_dir, WEIGHTS_NAME)
 | 
			
		||||
   output_config_file = os.path.join(output_dir, CONFIG_NAME)
 | 
			
		||||
 | 
			
		||||
   torch.save(model_to_save.state_dict(), output_model_file)
 | 
			
		||||
   model_to_save.config.to_json_file(output_config_file)
 | 
			
		||||
   tokenizer.save_vocabulary(output_dir)
 | 
			
		||||
 | 
			
		||||
   # Step 2: Re-load the saved model and vocabulary
 | 
			
		||||
 | 
			
		||||
   # Example for a Bert model
 | 
			
		||||
   model = BertForQuestionAnswering.from_pretrained(output_dir)
 | 
			
		||||
   tokenizer = BertTokenizer.from_pretrained(output_dir, do_lower_case=args.do_lower_case)  # Add specific options if needed
 | 
			
		||||
   # Example for a GPT model
 | 
			
		||||
   model = OpenAIGPTDoubleHeadsModel.from_pretrained(output_dir)
 | 
			
		||||
   tokenizer = OpenAIGPTTokenizer.from_pretrained(output_dir)
 | 
			
		||||
 | 
			
		||||
Here is another way you can save and reload the model if you want to use specific paths for each type of files:
 | 
			
		||||
 | 
			
		||||
.. code-block:: python
 | 
			
		||||
 | 
			
		||||
   output_model_file = "./models/my_own_model_file.bin"
 | 
			
		||||
   output_config_file = "./models/my_own_config_file.bin"
 | 
			
		||||
   output_vocab_file = "./models/my_own_vocab_file.bin"
 | 
			
		||||
 | 
			
		||||
   # Step 1: Save a model, configuration and vocabulary that you have fine-tuned
 | 
			
		||||
 | 
			
		||||
   # If we have a distributed model, save only the encapsulated model
 | 
			
		||||
   # (it was wrapped in PyTorch DistributedDataParallel or DataParallel)
 | 
			
		||||
   model_to_save = model.module if hasattr(model, 'module') else model
 | 
			
		||||
 | 
			
		||||
   torch.save(model_to_save.state_dict(), output_model_file)
 | 
			
		||||
   model_to_save.config.to_json_file(output_config_file)
 | 
			
		||||
   tokenizer.save_vocabulary(output_vocab_file)
 | 
			
		||||
 | 
			
		||||
   # Step 2: Re-load the saved model and vocabulary
 | 
			
		||||
 | 
			
		||||
   # We didn't save using the predefined WEIGHTS_NAME, CONFIG_NAME names, we cannot load using `from_pretrained`.
 | 
			
		||||
   # Here is how to do it in this situation:
 | 
			
		||||
 | 
			
		||||
   # Example for a Bert model
 | 
			
		||||
   config = BertConfig.from_json_file(output_config_file)
 | 
			
		||||
   model = BertForQuestionAnswering(config)
 | 
			
		||||
   state_dict = torch.load(output_model_file)
 | 
			
		||||
   model.load_state_dict(state_dict)
 | 
			
		||||
   tokenizer = BertTokenizer(output_vocab_file, do_lower_case=args.do_lower_case)
 | 
			
		||||
 | 
			
		||||
   # Example for a GPT model
 | 
			
		||||
   config = OpenAIGPTConfig.from_json_file(output_config_file)
 | 
			
		||||
   model = OpenAIGPTDoubleHeadsModel(config)
 | 
			
		||||
   state_dict = torch.load(output_model_file)
 | 
			
		||||
   model.load_state_dict(state_dict)
 | 
			
		||||
   tokenizer = OpenAIGPTTokenizer(output_vocab_file)
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										135
									
								
								docs/source/torchscript.rst
									
									
									
									
									
										Normal file
									
								
							
							
						
						@ -0,0 +1,135 @@
 | 
			
		||||
TorchScript
 | 
			
		||||
================================================
 | 
			
		||||
 | 
			
		||||
.. note::
 | 
			
		||||
    This is the very beginning of our experiments with TorchScript and we are still exploring its capabilities
 | 
			
		||||
    with variable-input-size models. It is a focus of interest to us and we will deepen our analysis in upcoming
 | 
			
		||||
    releases, with more code examples, a more flexible implementation, and benchmarks comparing python-based codes
 | 
			
		||||
    with compiled TorchScript.
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
According to Pytorch's documentation: "TorchScript is a way to create serializable and optimizable models from PyTorch code".
 | 
			
		||||
Pytorch's two modules `JIT and TRACE <https://pytorch.org/docs/stable/jit.html>`_ allow the developer to export
 | 
			
		||||
their model to be re-used in other programs, such as efficiency-oriented C++ programs.
 | 
			
		||||
 | 
			
		||||
We have provided an interface that allows the export of `transformers` models to TorchScript so that they can
 | 
			
		||||
be reused in a different environment than a Pytorch-based python program. Here we explain how to use our models so that
 | 
			
		||||
they can be exported, and what to be mindful of when using these models with TorchScript.
 | 
			
		||||
 | 
			
		||||
Exporting a model needs two things:
 | 
			
		||||
 | 
			
		||||
* dummy inputs to execute a model forward pass.
 | 
			
		||||
* the model needs to be instantiated with the ``torchscript`` flag.
 | 
			
		||||
 | 
			
		||||
These necessities imply several things developers should be careful about. These are detailed below.
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
Implications
 | 
			
		||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 | 
			
		||||
 | 
			
		||||
TorchScript flag and tied weights
 | 
			
		||||
------------------------------------------------
 | 
			
		||||
This flag is necessary because most of the language models in this repository have tied weights between their
 | 
			
		||||
``Embedding`` layer and their ``Decoding`` layer. TorchScript does not allow the export of models that have tied weights,
 | 
			
		||||
it is therefore necessary to untie the weights beforehand.
 | 
			
		||||
 | 
			
		||||
This implies that models instantiated with the ``torchscript`` flag have their ``Embedding`` layer and ``Decoding`` layer
 | 
			
		||||
separate, which means that they should not be trained down the line. Training would de-synchronize the two layers,
 | 
			
		||||
leading to unexpected results.
 | 
			
		||||
 | 
			
		||||
This is not the case for models that do not have a Language Model head, as those do not have tied weights. These models
 | 
			
		||||
can be safely exported without the ``torchscript`` flag.
 | 
			
		||||
 | 
			
		||||
Dummy inputs and standard lengths
 | 
			
		||||
------------------------------------------------
 | 
			
		||||
 | 
			
		||||
The dummy inputs are used to do a model forward pass. While the inputs' values are propagating through the layers,
 | 
			
		||||
Pytorch keeps track of the different operations executed on each tensor. These recorded operations are then used
 | 
			
		||||
to create the "trace" of the model.
 | 
			
		||||
 | 
			
		||||
The trace is created relatively to the inputs' dimensions. It is therefore constrained by the dimensions of the dummy
 | 
			
		||||
input, and will not work for any other sequence length or batch size. When trying with a different size, an error such
 | 
			
		||||
as:
 | 
			
		||||
 | 
			
		||||
``The expanded size of the tensor (3) must match the existing size (7) at non-singleton dimension 2``
 | 
			
		||||
 | 
			
		||||
will be raised. It is therefore recommended to trace the model with a dummy input size at least as large as the largest
 | 
			
		||||
input that will be fed to the model during inference. Padding can be performed to fill the missing values. As the model
 | 
			
		||||
will have been traced with a large input size however, the dimensions of the different matrix will be large as well,
 | 
			
		||||
resulting in more calculations.
 | 
			
		||||
 | 
			
		||||
It is recommended to be careful of the total number of operations done on each input and to follow performance closely
 | 
			
		||||
when exporting varying sequence-length models.
 | 
			
		||||
 | 
			
		||||
Using TorchScript in Python
 | 
			
		||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 | 
			
		||||
 | 
			
		||||
Below are examples of using the Python to save, load models as well as how to use the trace for inference.
 | 
			
		||||
 | 
			
		||||
Saving a model
 | 
			
		||||
------------------------------------------------
 | 
			
		||||
 | 
			
		||||
This snippet shows how to use TorchScript to export a ``BertModel``. Here the ``BertModel`` is instantiated
 | 
			
		||||
according to a ``BertConfig`` class and then saved to disk under the filename ``traced_bert.pt``
 | 
			
		||||
 | 
			
		||||
.. code-block:: python
 | 
			
		||||
 | 
			
		||||
    from transformers import BertModel, BertTokenizer, BertConfig
 | 
			
		||||
    import torch
 | 
			
		||||
 | 
			
		||||
    enc = BertTokenizer.from_pretrained("bert-base-uncased")
 | 
			
		||||
 | 
			
		||||
    # Tokenizing input text
 | 
			
		||||
    text = "[CLS] Who was Jim Henson ? [SEP] Jim Henson was a puppeteer [SEP]"
 | 
			
		||||
    tokenized_text = enc.tokenize(text)
 | 
			
		||||
 | 
			
		||||
    # Masking one of the input tokens
 | 
			
		||||
    masked_index = 8
 | 
			
		||||
    tokenized_text[masked_index] = '[MASK]'
 | 
			
		||||
    indexed_tokens = enc.convert_tokens_to_ids(tokenized_text)
 | 
			
		||||
    segments_ids = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1]
 | 
			
		||||
 | 
			
		||||
    # Creating a dummy input
 | 
			
		||||
    tokens_tensor = torch.tensor([indexed_tokens])
 | 
			
		||||
    segments_tensors = torch.tensor([segments_ids])
 | 
			
		||||
    dummy_input = [tokens_tensor, segments_tensors]
 | 
			
		||||
 | 
			
		||||
    # Initializing the model with the torchscript flag
 | 
			
		||||
    # Flag set to True even though it is not necessary as this model does not have an LM Head.
 | 
			
		||||
    config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
 | 
			
		||||
        num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, torchscript=True)
 | 
			
		||||
 | 
			
		||||
    # Instantiating the model
 | 
			
		||||
    model = BertModel(config)
 | 
			
		||||
 | 
			
		||||
    # The model needs to be in evaluation mode
 | 
			
		||||
    model.eval()
 | 
			
		||||
 | 
			
		||||
    # If you are instantiating the model with `from_pretrained` you can also easily set the TorchScript flag
 | 
			
		||||
    model = BertModel.from_pretrained("bert-base-uncased", torchscript=True)
 | 
			
		||||
 | 
			
		||||
    # Creating the trace
 | 
			
		||||
    traced_model = torch.jit.trace(model, [tokens_tensor, segments_tensors])
 | 
			
		||||
    torch.jit.save(traced_model, "traced_bert.pt")
 | 
			
		||||
 | 
			
		||||
Loading a model
 | 
			
		||||
------------------------------------------------
 | 
			
		||||
 | 
			
		||||
This snippet shows how to load the ``BertModel`` that was previously saved to disk under the name ``traced_bert.pt``.
 | 
			
		||||
We are re-using the previously initialised ``dummy_input``.
 | 
			
		||||
 | 
			
		||||
.. code-block:: python
 | 
			
		||||
 | 
			
		||||
    loaded_model = torch.jit.load("traced_model.pt")
 | 
			
		||||
    loaded_model.eval()
 | 
			
		||||
 | 
			
		||||
    all_encoder_layers, pooled_output = loaded_model(dummy_input)
 | 
			
		||||
 | 
			
		||||
Using a traced model for inference
 | 
			
		||||
------------------------------------------------
 | 
			
		||||
 | 
			
		||||
Using the traced model for inference is as simple as using its ``__call__`` dunder method:
 | 
			
		||||
 | 
			
		||||
.. code-block:: python
 | 
			
		||||
 | 
			
		||||
    traced_model(tokens_tensor, segments_tensors)
 | 
			
		||||
							
								
								
									
										392
									
								
								examples/README.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						@ -0,0 +1,392 @@
 | 
			
		||||
# Examples
 | 
			
		||||
 | 
			
		||||
In this section a few examples are put together. All of these examples work for several models, making use of the very
 | 
			
		||||
similar API between the different models.
 | 
			
		||||
 | 
			
		||||
| Section                    | Description                                                                                                                                                |
 | 
			
		||||
|----------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------|
 | 
			
		||||
| [Language Model fine-tuning](#language-model-fine-tuning) | Fine-tuning the library models for language modeling on a text dataset. Causal language modeling for GPT/GPT-2, masked language modeling for BERT/RoBERTa. |
 | 
			
		||||
| [Language Generation](#language-generation) | Conditional text generation using the auto-regressive models of the library: GPT, GPT-2, Transformer-XL and XLNet.                                         |
 | 
			
		||||
| [GLUE](#glue) | Examples running BERT/XLM/XLNet/RoBERTa on the 9 GLUE tasks. Examples feature distributed training as well as half-precision.                              |
 | 
			
		||||
| [SQuAD](#squad) | Using BERT for question answering, examples with distributed training.                                                                                  |
 | 
			
		||||
| [Multiple Choice](#multiple choice) | Examples running BERT/XLNet/RoBERTa on the SWAG/RACE/ARC tasks. 
 | 
			
		||||
 | 
			
		||||
## Language model fine-tuning
 | 
			
		||||
 | 
			
		||||
Based on the script [`run_lm_finetuning.py`](https://github.com/huggingface/transformers/blob/master/examples/run_lm_finetuning.py).
 | 
			
		||||
 | 
			
		||||
Fine-tuning the library models for language modeling on a text dataset for GPT, GPT-2, BERT and RoBERTa (DistilBERT 
 | 
			
		||||
to be added soon). GPT and GPT-2 are fine-tuned using a causal language modeling (CLM) loss while BERT and RoBERTa 
 | 
			
		||||
are fine-tuned using a masked language modeling (MLM) loss.
 | 
			
		||||
 | 
			
		||||
Before running the following example, you should get a file that contains text on which the language model will be
 | 
			
		||||
fine-tuned. A good example of such text is the [WikiText-2 dataset](https://blog.einstein.ai/the-wikitext-long-term-dependency-language-modeling-dataset/).
 | 
			
		||||
 | 
			
		||||
We will refer to two different files: `$TRAIN_FILE`, which contains text for training, and `$TEST_FILE`, which contains
 | 
			
		||||
text that will be used for evaluation.
 | 
			
		||||
 | 
			
		||||
### GPT-2/GPT and causal language modeling
 | 
			
		||||
 | 
			
		||||
The following example fine-tunes GPT-2 on WikiText-2. We're using the raw WikiText-2 (no tokens were replaced before
 | 
			
		||||
the tokenization). The loss here is that of causal language modeling.
 | 
			
		||||
 | 
			
		||||
```bash
 | 
			
		||||
export TRAIN_FILE=/path/to/dataset/wiki.train.raw
 | 
			
		||||
export TEST_FILE=/path/to/dataset/wiki.test.raw
 | 
			
		||||
 | 
			
		||||
python run_lm_finetuning.py \
 | 
			
		||||
    --output_dir=output \
 | 
			
		||||
    --model_type=gpt2 \
 | 
			
		||||
    --model_name_or_path=gpt2 \
 | 
			
		||||
    --do_train \
 | 
			
		||||
    --train_data_file=$TRAIN_FILE \
 | 
			
		||||
    --do_eval \
 | 
			
		||||
    --eval_data_file=$TEST_FILE
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
This takes about half an hour to train on a single K80 GPU and about one minute for the evaluation to run. It reaches
 | 
			
		||||
a score of ~20 perplexity once fine-tuned on the dataset.
 | 
			
		||||
 | 
			
		||||
### RoBERTa/BERT and masked language modeling
 | 
			
		||||
 | 
			
		||||
The following example fine-tunes RoBERTa on WikiText-2. Here too, we're using the raw WikiText-2. The loss is different
 | 
			
		||||
as BERT/RoBERTa have a bidirectional mechanism; we're therefore using the same loss that was used during their
 | 
			
		||||
pre-training: masked language modeling. 
 | 
			
		||||
 | 
			
		||||
In accordance to the RoBERTa paper, we use dynamic masking rather than static masking. The model may, therefore, converge
 | 
			
		||||
slightly slower (over-fitting takes more epochs).
 | 
			
		||||
 | 
			
		||||
We use the `--mlm` flag so that the script may change its loss function.
 | 
			
		||||
 | 
			
		||||
```bash
 | 
			
		||||
export TRAIN_FILE=/path/to/dataset/wiki.train.raw
 | 
			
		||||
export TEST_FILE=/path/to/dataset/wiki.test.raw
 | 
			
		||||
 | 
			
		||||
python run_lm_finetuning.py \
 | 
			
		||||
    --output_dir=output \
 | 
			
		||||
    --model_type=roberta \
 | 
			
		||||
    --model_name_or_path=roberta-base \
 | 
			
		||||
    --do_train \
 | 
			
		||||
    --train_data_file=$TRAIN_FILE \
 | 
			
		||||
    --do_eval \
 | 
			
		||||
    --eval_data_file=$TEST_FILE \
 | 
			
		||||
    --mlm
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
## Language generation
 | 
			
		||||
 | 
			
		||||
Based on the script [`run_generation.py`](https://github.com/huggingface/transformers/blob/master/examples/run_generation.py).
 | 
			
		||||
 | 
			
		||||
Conditional text generation using the auto-regressive models of the library: GPT, GPT-2, Transformer-XL and XLNet.
 | 
			
		||||
A similar script is used for our official demo [Write With Transfomer](https://transformer.huggingface.co), where you
 | 
			
		||||
can try out the different models available in the library.
 | 
			
		||||
 | 
			
		||||
Example usage:
 | 
			
		||||
 | 
			
		||||
```bash
 | 
			
		||||
python run_generation.py \
 | 
			
		||||
    --model_type=gpt2 \
 | 
			
		||||
    --model_name_or_path=gpt2
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
## GLUE
 | 
			
		||||
 | 
			
		||||
Based on the script [`run_glue.py`](https://github.com/huggingface/transformers/blob/master/examples/run_glue.py).
 | 
			
		||||
 | 
			
		||||
Fine-tuning the library models for sequence classification on the GLUE benchmark: [General Language Understanding 
 | 
			
		||||
Evaluation](https://gluebenchmark.com/). This script can fine-tune the following models: BERT, XLM, XLNet and RoBERTa. 
 | 
			
		||||
 | 
			
		||||
GLUE is made up of a total of 9 different tasks. We get the following results on the dev set of the benchmark with an
 | 
			
		||||
uncased  BERT base model (the checkpoint `bert-base-uncased`). All experiments ran on 8 V100 GPUs with a total train
 | 
			
		||||
batch size of 24. Some of these tasks have a small dataset and training can lead to high variance in the results
 | 
			
		||||
between different runs. We report the median on 5 runs (with different seeds) for each of the metrics.
 | 
			
		||||
 | 
			
		||||
| Task  | Metric                       | Result      |
 | 
			
		||||
|-------|------------------------------|-------------|
 | 
			
		||||
| CoLA  | Matthew's corr               | 48.87       |
 | 
			
		||||
| SST-2 | Accuracy                     | 91.74       |
 | 
			
		||||
| MRPC  | F1/Accuracy                  | 90.70/86.27 |
 | 
			
		||||
| STS-B | Person/Spearman corr.        | 91.39/91.04 |
 | 
			
		||||
| QQP   | Accuracy/F1                  | 90.79/87.66 |
 | 
			
		||||
| MNLI  | Matched acc./Mismatched acc. | 83.70/84.83 |
 | 
			
		||||
| QNLI  | Accuracy                     | 89.31       |
 | 
			
		||||
| RTE   | Accuracy                     | 71.43       |
 | 
			
		||||
| WNLI  | Accuracy                     | 43.66       |
 | 
			
		||||
 | 
			
		||||
Some of these results are significantly different from the ones reported on the test set
 | 
			
		||||
of GLUE benchmark on the website. For QQP and WNLI, please refer to [FAQ #12](https://gluebenchmark.com/faq) on the webite.
 | 
			
		||||
 | 
			
		||||
Before running anyone of these GLUE tasks you should download the
 | 
			
		||||
[GLUE data](https://gluebenchmark.com/tasks) by running
 | 
			
		||||
[this script](https://gist.github.com/W4ngatang/60c2bdb54d156a41194446737ce03e2e)
 | 
			
		||||
and unpack it to some directory `$GLUE_DIR`.
 | 
			
		||||
 | 
			
		||||
```bash
 | 
			
		||||
export GLUE_DIR=/path/to/glue
 | 
			
		||||
export TASK_NAME=MRPC
 | 
			
		||||
 | 
			
		||||
python run_glue.py \
 | 
			
		||||
  --model_type bert \
 | 
			
		||||
  --model_name_or_path bert-base-cased \
 | 
			
		||||
  --task_name $TASK_NAME \
 | 
			
		||||
  --do_train \
 | 
			
		||||
  --do_eval \
 | 
			
		||||
  --do_lower_case \
 | 
			
		||||
  --data_dir $GLUE_DIR/$TASK_NAME \
 | 
			
		||||
  --max_seq_length 128 \
 | 
			
		||||
  --per_gpu_train_batch_size 32 \
 | 
			
		||||
  --learning_rate 2e-5 \
 | 
			
		||||
  --num_train_epochs 3.0 \
 | 
			
		||||
  --output_dir /tmp/$TASK_NAME/
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
where task name can be one of CoLA, SST-2, MRPC, STS-B, QQP, MNLI, QNLI, RTE, WNLI.
 | 
			
		||||
 | 
			
		||||
The dev set results will be present within the text file `eval_results.txt` in the specified output_dir. 
 | 
			
		||||
In case of MNLI, since there are two separate dev sets (matched and mismatched), there will be a separate 
 | 
			
		||||
output folder called `/tmp/MNLI-MM/` in addition to `/tmp/MNLI/`.
 | 
			
		||||
 | 
			
		||||
The code has not been tested with half-precision training with apex on any GLUE task apart from MRPC, MNLI, 
 | 
			
		||||
CoLA, SST-2. The following section provides details on how to run half-precision training with MRPC. With that being 
 | 
			
		||||
said, there shouldn’t be any issues in running half-precision training with the remaining GLUE tasks as well, 
 | 
			
		||||
since the data processor for each task inherits from the base class DataProcessor.
 | 
			
		||||
 | 
			
		||||
### MRPC
 | 
			
		||||
 | 
			
		||||
#### Fine-tuning example
 | 
			
		||||
 | 
			
		||||
The following examples fine-tune BERT on the Microsoft Research Paraphrase Corpus (MRPC) corpus and runs in less 
 | 
			
		||||
than 10 minutes on a single K-80 and in 27 seconds (!) on single tesla V100 16GB with apex installed.
 | 
			
		||||
 | 
			
		||||
Before running anyone of these GLUE tasks you should download the
 | 
			
		||||
[GLUE data](https://gluebenchmark.com/tasks) by running
 | 
			
		||||
[this script](https://gist.github.com/W4ngatang/60c2bdb54d156a41194446737ce03e2e)
 | 
			
		||||
and unpack it to some directory `$GLUE_DIR`.
 | 
			
		||||
 | 
			
		||||
```bash
 | 
			
		||||
export GLUE_DIR=/path/to/glue
 | 
			
		||||
 | 
			
		||||
python run_glue.py \
 | 
			
		||||
  --model_type bert \
 | 
			
		||||
  --model_name_or_path bert-base-cased \
 | 
			
		||||
  --task_name MRPC \
 | 
			
		||||
  --do_train \
 | 
			
		||||
  --do_eval \
 | 
			
		||||
  --do_lower_case \
 | 
			
		||||
  --data_dir $GLUE_DIR/MRPC/ \
 | 
			
		||||
  --max_seq_length 128 \
 | 
			
		||||
  --per_gpu_train_batch_size 32 \
 | 
			
		||||
  --learning_rate 2e-5 \
 | 
			
		||||
  --num_train_epochs 3.0 \
 | 
			
		||||
  --output_dir /tmp/mrpc_output/
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
Our test ran on a few seeds with [the original implementation hyper-
 | 
			
		||||
parameters](https://github.com/google-research/bert#sentence-and-sentence-pair-classification-tasks) gave evaluation 
 | 
			
		||||
results between 84% and 88%.
 | 
			
		||||
 | 
			
		||||
#### Using Apex and mixed-precision
 | 
			
		||||
 | 
			
		||||
Using Apex and 16 bit precision, the fine-tuning on MRPC only takes 27 seconds. First install 
 | 
			
		||||
[apex](https://github.com/NVIDIA/apex), then run the following example:
 | 
			
		||||
 | 
			
		||||
```bash
 | 
			
		||||
export GLUE_DIR=/path/to/glue
 | 
			
		||||
 | 
			
		||||
python run_glue.py \
 | 
			
		||||
  --model_type bert \
 | 
			
		||||
  --model_name_or_path bert-base-cased \
 | 
			
		||||
  --task_name MRPC \
 | 
			
		||||
  --do_train \
 | 
			
		||||
  --do_eval \
 | 
			
		||||
  --do_lower_case \
 | 
			
		||||
  --data_dir $GLUE_DIR/MRPC/ \
 | 
			
		||||
  --max_seq_length 128 \
 | 
			
		||||
  --per_gpu_train_batch_size 32 \
 | 
			
		||||
  --learning_rate 2e-5 \
 | 
			
		||||
  --num_train_epochs 3.0 \
 | 
			
		||||
  --output_dir /tmp/mrpc_output/ \
 | 
			
		||||
  --fp16
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
#### Distributed training
 | 
			
		||||
 | 
			
		||||
Here is an example using distributed training on 8 V100 GPUs. The model used is the BERT whole-word-masking and it
 | 
			
		||||
reaches F1 > 92 on MRPC.
 | 
			
		||||
 | 
			
		||||
```bash
 | 
			
		||||
export GLUE_DIR=/path/to/glue
 | 
			
		||||
 | 
			
		||||
python -m torch.distributed.launch \
 | 
			
		||||
    --nproc_per_node 8 run_glue.py \
 | 
			
		||||
    --model_type bert \
 | 
			
		||||
    --model_name_or_path bert-base-cased \
 | 
			
		||||
    --task_name MRPC \
 | 
			
		||||
    --do_train \
 | 
			
		||||
    --do_eval \
 | 
			
		||||
    --do_lower_case \
 | 
			
		||||
    --data_dir $GLUE_DIR/MRPC/ \
 | 
			
		||||
    --max_seq_length 128 \
 | 
			
		||||
    --per_gpu_train_batch_size 8 \
 | 
			
		||||
    --learning_rate 2e-5 \
 | 
			
		||||
    --num_train_epochs 3.0 \
 | 
			
		||||
    --output_dir /tmp/mrpc_output/
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
Training with these hyper-parameters gave us the following results:
 | 
			
		||||
 | 
			
		||||
```bash
 | 
			
		||||
acc = 0.8823529411764706
 | 
			
		||||
acc_and_f1 = 0.901702786377709
 | 
			
		||||
eval_loss = 0.3418912578906332
 | 
			
		||||
f1 = 0.9210526315789473
 | 
			
		||||
global_step = 174
 | 
			
		||||
loss = 0.07231863956341798
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
### MNLI
 | 
			
		||||
 | 
			
		||||
The following example uses the BERT-large, uncased, whole-word-masking model and fine-tunes it on the MNLI task.
 | 
			
		||||
 | 
			
		||||
```bash
 | 
			
		||||
export GLUE_DIR=/path/to/glue
 | 
			
		||||
 | 
			
		||||
python -m torch.distributed.launch \
 | 
			
		||||
    --nproc_per_node 8 run_glue.py \
 | 
			
		||||
    --model_type bert \
 | 
			
		||||
    --model_name_or_path bert-base-cased \
 | 
			
		||||
    --task_name mnli \
 | 
			
		||||
    --do_train \
 | 
			
		||||
    --do_eval \
 | 
			
		||||
    --do_lower_case \
 | 
			
		||||
    --data_dir $GLUE_DIR/MNLI/ \
 | 
			
		||||
    --max_seq_length 128 \
 | 
			
		||||
    --per_gpu_train_batch_size 8 \
 | 
			
		||||
    --learning_rate 2e-5 \
 | 
			
		||||
    --num_train_epochs 3.0 \
 | 
			
		||||
    --output_dir output_dir \
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
The results  are the following:
 | 
			
		||||
 | 
			
		||||
```bash
 | 
			
		||||
***** Eval results *****
 | 
			
		||||
  acc = 0.8679706601466992
 | 
			
		||||
  eval_loss = 0.4911287787382479
 | 
			
		||||
  global_step = 18408
 | 
			
		||||
  loss = 0.04755385363816904
 | 
			
		||||
 | 
			
		||||
***** Eval results *****
 | 
			
		||||
  acc = 0.8747965825874695
 | 
			
		||||
  eval_loss = 0.45516540421714036
 | 
			
		||||
  global_step = 18408
 | 
			
		||||
  loss = 0.04755385363816904
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
##Multiple Choice
 | 
			
		||||
 | 
			
		||||
Based on the script [`run_multiple_choice.py`]().
 | 
			
		||||
 | 
			
		||||
#### Fine-tuning on SWAG
 | 
			
		||||
Download [swag](https://github.com/rowanz/swagaf/tree/master/data) data
 | 
			
		||||
 | 
			
		||||
```
 | 
			
		||||
#training on 4 tesla V100(16GB) GPUS
 | 
			
		||||
export SWAG_DIR=/path/to/swag_data_dir
 | 
			
		||||
python ./examples/single_model_scripts/run_multiple_choice.py \
 | 
			
		||||
--model_type roberta \
 | 
			
		||||
--task_name swag \
 | 
			
		||||
--model_name_or_path roberta-base \
 | 
			
		||||
--do_train \
 | 
			
		||||
--do_eval \
 | 
			
		||||
--do_lower_case \
 | 
			
		||||
--data_dir $SWAG_DIR \
 | 
			
		||||
--learning_rate 5e-5 \
 | 
			
		||||
--num_train_epochs 3 \
 | 
			
		||||
--max_seq_length 80 \
 | 
			
		||||
--output_dir models_bert/swag_base \
 | 
			
		||||
--per_gpu_eval_batch_size=16 \
 | 
			
		||||
--per_gpu_train_batch_size=16 \
 | 
			
		||||
--gradient_accumulation_steps 2 \
 | 
			
		||||
--overwrite_output
 | 
			
		||||
```
 | 
			
		||||
Training with the defined hyper-parameters yields the following results:
 | 
			
		||||
```
 | 
			
		||||
***** Eval results *****
 | 
			
		||||
eval_acc = 0.8338998300509847
 | 
			
		||||
eval_loss = 0.44457291918821606
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
## SQuAD
 | 
			
		||||
 | 
			
		||||
Based on the script [`run_squad.py`](https://github.com/huggingface/transformers/blob/master/examples/run_squad.py).
 | 
			
		||||
 | 
			
		||||
#### Fine-tuning on SQuAD
 | 
			
		||||
 | 
			
		||||
This example code fine-tunes BERT on the SQuAD dataset. It runs in 24 min (with BERT-base) or 68 min (with BERT-large) 
 | 
			
		||||
on a single tesla V100 16GB. The data for SQuAD can be downloaded with the following links and should be saved in a 
 | 
			
		||||
$SQUAD_DIR directory.
 | 
			
		||||
 | 
			
		||||
* [train-v1.1.json](https://rajpurkar.github.io/SQuAD-explorer/dataset/train-v1.1.json)
 | 
			
		||||
* [dev-v1.1.json](https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v1.1.json)
 | 
			
		||||
* [evaluate-v1.1.py](https://github.com/allenai/bi-att-flow/blob/master/squad/evaluate-v1.1.py)
 | 
			
		||||
 | 
			
		||||
```bash
 | 
			
		||||
export SQUAD_DIR=/path/to/SQUAD
 | 
			
		||||
 | 
			
		||||
python run_squad.py \
 | 
			
		||||
  --model_type bert \
 | 
			
		||||
  --model_name_or_path bert-base-cased \
 | 
			
		||||
  --do_train \
 | 
			
		||||
  --do_eval \
 | 
			
		||||
  --do_lower_case \
 | 
			
		||||
  --train_file $SQUAD_DIR/train-v1.1.json \
 | 
			
		||||
  --predict_file $SQUAD_DIR/dev-v1.1.json \
 | 
			
		||||
  --per_gpu_train_batch_size 12 \
 | 
			
		||||
  --learning_rate 3e-5 \
 | 
			
		||||
  --num_train_epochs 2.0 \
 | 
			
		||||
  --max_seq_length 384 \
 | 
			
		||||
  --doc_stride 128 \
 | 
			
		||||
  --output_dir /tmp/debug_squad/
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
Training with the previously defined hyper-parameters yields the following results:
 | 
			
		||||
 | 
			
		||||
```bash
 | 
			
		||||
f1 = 88.52
 | 
			
		||||
exact_match = 81.22
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
#### Distributed training
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
Here is an example using distributed training on 8 V100 GPUs and Bert Whole Word Masking uncased model to reach a F1 > 93 on SQuAD:
 | 
			
		||||
 | 
			
		||||
```bash
 | 
			
		||||
python -m torch.distributed.launch --nproc_per_node=8 run_squad.py \
 | 
			
		||||
    --model_type bert \
 | 
			
		||||
    --model_name_or_path bert-base-cased \
 | 
			
		||||
    --do_train \
 | 
			
		||||
    --do_eval \
 | 
			
		||||
    --do_lower_case \
 | 
			
		||||
    --train_file $SQUAD_DIR/train-v1.1.json \
 | 
			
		||||
    --predict_file $SQUAD_DIR/dev-v1.1.json \
 | 
			
		||||
    --learning_rate 3e-5 \
 | 
			
		||||
    --num_train_epochs 2 \
 | 
			
		||||
    --max_seq_length 384 \
 | 
			
		||||
    --doc_stride 128 \
 | 
			
		||||
    --output_dir ../models/wwm_uncased_finetuned_squad/ \
 | 
			
		||||
    --per_gpu_train_batch_size 24 \
 | 
			
		||||
    --gradient_accumulation_steps 12
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
Training with the previously defined hyper-parameters yields the following results:
 | 
			
		||||
 | 
			
		||||
```bash
 | 
			
		||||
f1 = 93.15
 | 
			
		||||
exact_match = 86.91
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
This fine-tuneds model is available as a checkpoint under the reference
 | 
			
		||||
`bert-large-uncased-whole-word-masking-finetuned-squad`.
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										5
									
								
								examples/contrib/README.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						@ -0,0 +1,5 @@
 | 
			
		||||
# Community contributed examples
 | 
			
		||||
 | 
			
		||||
This folder contains examples which are not actively maintained (mostly contributed by the community).
 | 
			
		||||
 | 
			
		||||
Using these examples together with a recent version of the library usually requires to make small (sometimes big) adaptations to get the scripts working.
 | 
			
		||||
@ -39,8 +39,9 @@ import torch
 | 
			
		||||
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
 | 
			
		||||
                              TensorDataset)
 | 
			
		||||
 | 
			
		||||
from pytorch_pretrained_bert import (OpenAIGPTDoubleHeadsModel, OpenAIGPTTokenizer,
 | 
			
		||||
                                     OpenAIAdam, cached_path, WEIGHTS_NAME, CONFIG_NAME)
 | 
			
		||||
from transformers import (OpenAIGPTDoubleHeadsModel, OpenAIGPTTokenizer,
 | 
			
		||||
                                     AdamW, cached_path, WEIGHTS_NAME, CONFIG_NAME,
 | 
			
		||||
                                     WarmupLinearSchedule)
 | 
			
		||||
 | 
			
		||||
ROCSTORIES_URL = "https://s3.amazonaws.com/datasets.huggingface.co/ROCStories.tar.gz"
 | 
			
		||||
 | 
			
		||||
@ -83,8 +84,8 @@ def pre_process_datasets(encoded_datasets, input_len, cap_length, start_token, d
 | 
			
		||||
            input_ids[i, 1, :len(with_cont2)] = with_cont2
 | 
			
		||||
            mc_token_ids[i, 0] = len(with_cont1) - 1
 | 
			
		||||
            mc_token_ids[i, 1] = len(with_cont2) - 1
 | 
			
		||||
            lm_labels[i, 0, :len(with_cont1)-1] = with_cont1[1:]
 | 
			
		||||
            lm_labels[i, 1, :len(with_cont2)-1] = with_cont2[1:]
 | 
			
		||||
            lm_labels[i, 0, :len(with_cont1)] = with_cont1
 | 
			
		||||
            lm_labels[i, 1, :len(with_cont2)] = with_cont2
 | 
			
		||||
            mc_labels[i] = mc_label
 | 
			
		||||
        all_inputs = (input_ids, mc_token_ids, lm_labels, mc_labels)
 | 
			
		||||
        tensor_datasets.append(tuple(torch.tensor(t) for t in all_inputs))
 | 
			
		||||
@ -104,9 +105,18 @@ def main():
 | 
			
		||||
    parser.add_argument('--num_train_epochs', type=int, default=3)
 | 
			
		||||
    parser.add_argument('--train_batch_size', type=int, default=8)
 | 
			
		||||
    parser.add_argument('--eval_batch_size', type=int, default=16)
 | 
			
		||||
    parser.add_argument("--adam_epsilon", default=1e-8, type=float,
 | 
			
		||||
                        help="Epsilon for Adam optimizer.")
 | 
			
		||||
    parser.add_argument('--max_grad_norm', type=int, default=1)
 | 
			
		||||
    parser.add_argument("--max_steps", default=-1, type=int,
 | 
			
		||||
                        help="If > 0: set total number of training \
 | 
			
		||||
                        steps to perform. Override num_train_epochs.")
 | 
			
		||||
    parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
 | 
			
		||||
                        help="Number of updates steps to accumulate before\
 | 
			
		||||
                        performing a backward/update pass.")
 | 
			
		||||
    parser.add_argument('--learning_rate', type=float, default=6.25e-5)
 | 
			
		||||
    parser.add_argument('--warmup_proportion', type=float, default=0.002)
 | 
			
		||||
    parser.add_argument("--warmup_steps", default=0, type=int,
 | 
			
		||||
                        help="Linear warmup over warmup_steps.")
 | 
			
		||||
    parser.add_argument('--lr_schedule', type=str, default='warmup_linear')
 | 
			
		||||
    parser.add_argument('--weight_decay', type=float, default=0.01)
 | 
			
		||||
    parser.add_argument('--lm_coef', type=float, default=0.9)
 | 
			
		||||
@ -143,9 +153,11 @@ def main():
 | 
			
		||||
    # This loading functions also add new tokens and embeddings called `special tokens`
 | 
			
		||||
    # These new embeddings will be fine-tuned on the RocStories dataset
 | 
			
		||||
    special_tokens = ['_start_', '_delimiter_', '_classify_']
 | 
			
		||||
    tokenizer = OpenAIGPTTokenizer.from_pretrained(args.model_name, special_tokens=special_tokens)
 | 
			
		||||
    special_tokens_ids = list(tokenizer.convert_tokens_to_ids(token) for token in special_tokens)
 | 
			
		||||
    model = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name, num_special_tokens=len(special_tokens))
 | 
			
		||||
    tokenizer = OpenAIGPTTokenizer.from_pretrained(args.model_name)
 | 
			
		||||
    tokenizer.add_tokens(special_tokens)
 | 
			
		||||
    special_tokens_ids = tokenizer.convert_tokens_to_ids(special_tokens)
 | 
			
		||||
    model = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name)
 | 
			
		||||
    model.resize_token_embeddings(len(tokenizer))
 | 
			
		||||
    model.to(device)
 | 
			
		||||
 | 
			
		||||
    # Load and encode the datasets
 | 
			
		||||
@ -183,19 +195,23 @@ def main():
 | 
			
		||||
    eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)
 | 
			
		||||
 | 
			
		||||
    # Prepare optimizer
 | 
			
		||||
    param_optimizer = list(model.named_parameters())
 | 
			
		||||
    no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
 | 
			
		||||
    optimizer_grouped_parameters = [
 | 
			
		||||
        {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
 | 
			
		||||
        {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
 | 
			
		||||
        ]
 | 
			
		||||
    num_train_optimization_steps = len(train_data) * args.num_train_epochs // args.train_batch_size
 | 
			
		||||
    optimizer = OpenAIAdam(optimizer_grouped_parameters,
 | 
			
		||||
                           lr=args.learning_rate,
 | 
			
		||||
                           warmup=args.warmup_proportion,
 | 
			
		||||
                           max_grad_norm=args.max_grad_norm,
 | 
			
		||||
                           weight_decay=args.weight_decay,
 | 
			
		||||
                           t_total=num_train_optimization_steps)
 | 
			
		||||
    if args.do_train:
 | 
			
		||||
        if args.max_steps > 0:
 | 
			
		||||
            t_total = args.max_steps
 | 
			
		||||
            args.num_train_epochs = args.max_steps //\
 | 
			
		||||
                (len(train_dataloader) // args.gradient_accumulation_steps) + 1
 | 
			
		||||
        else:
 | 
			
		||||
            t_total = len(train_dataloader)\
 | 
			
		||||
                // args.gradient_accumulation_steps * args.num_train_epochs
 | 
			
		||||
 | 
			
		||||
        param_optimizer = list(model.named_parameters())
 | 
			
		||||
        no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
 | 
			
		||||
        optimizer_grouped_parameters = [
 | 
			
		||||
            {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay},
 | 
			
		||||
            {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
 | 
			
		||||
            ]
 | 
			
		||||
        optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
 | 
			
		||||
        scheduler = WarmupLinearSchedule(optimizer, warmup_steps=args.warmup_steps, t_total=t_total)
 | 
			
		||||
 | 
			
		||||
    if args.do_train:
 | 
			
		||||
        nb_tr_steps, tr_loss, exp_average_loss = 0, 0, None
 | 
			
		||||
@ -207,15 +223,16 @@ def main():
 | 
			
		||||
            for step, batch in enumerate(tqdm_bar):
 | 
			
		||||
                batch = tuple(t.to(device) for t in batch)
 | 
			
		||||
                input_ids, mc_token_ids, lm_labels, mc_labels = batch
 | 
			
		||||
                losses = model(input_ids, mc_token_ids, lm_labels, mc_labels)
 | 
			
		||||
                losses = model(input_ids, mc_token_ids=mc_token_ids, lm_labels=lm_labels, mc_labels=mc_labels)
 | 
			
		||||
                loss = args.lm_coef * losses[0] + losses[1]
 | 
			
		||||
                loss.backward()
 | 
			
		||||
                scheduler.step()
 | 
			
		||||
                optimizer.step()
 | 
			
		||||
                optimizer.zero_grad()
 | 
			
		||||
                tr_loss += loss.item()
 | 
			
		||||
                exp_average_loss = loss.item() if exp_average_loss is None else 0.7*exp_average_loss+0.3*loss.item()
 | 
			
		||||
                nb_tr_steps += 1
 | 
			
		||||
                tqdm_bar.desc = "Training loss: {:.2e} lr: {:.2e}".format(exp_average_loss, optimizer.get_lr()[0])
 | 
			
		||||
                tqdm_bar.desc = "Training loss: {:.2e} lr: {:.2e}".format(exp_average_loss, scheduler.get_lr()[0])
 | 
			
		||||
 | 
			
		||||
    # Save a trained model
 | 
			
		||||
    if args.do_train:
 | 
			
		||||
@ -243,8 +260,7 @@ def main():
 | 
			
		||||
            batch = tuple(t.to(device) for t in batch)
 | 
			
		||||
            input_ids, mc_token_ids, lm_labels, mc_labels = batch
 | 
			
		||||
            with torch.no_grad():
 | 
			
		||||
                _, mc_loss = model(input_ids, mc_token_ids, lm_labels, mc_labels)
 | 
			
		||||
                _, mc_logits = model(input_ids, mc_token_ids)
 | 
			
		||||
               _, mc_loss, _, mc_logits = model(input_ids, mc_token_ids=mc_token_ids, lm_labels=lm_labels, mc_labels=mc_labels)
 | 
			
		||||
 | 
			
		||||
            mc_logits = mc_logits.detach().cpu().numpy()
 | 
			
		||||
            mc_labels = mc_labels.to('cpu').numpy()
 | 
			
		||||
							
								
								
									
										673
									
								
								examples/contrib/run_swag.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						@ -0,0 +1,673 @@
 | 
			
		||||
# coding=utf-8
 | 
			
		||||
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
 | 
			
		||||
# Copyright (c) 2018, NVIDIA CORPORATION.  All rights reserved.
 | 
			
		||||
#
 | 
			
		||||
# Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
# you may not use this file except in compliance with the License.
 | 
			
		||||
# You may obtain a copy of the License at
 | 
			
		||||
#
 | 
			
		||||
#     http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
#
 | 
			
		||||
# Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
# distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
# See the License for the specific language governing permissions and
 | 
			
		||||
# limitations under the License.
 | 
			
		||||
"""BERT finetuning runner.
 | 
			
		||||
   Finetuning the library models for multiple choice on SWAG (Bert).
 | 
			
		||||
"""
 | 
			
		||||
from __future__ import absolute_import, division, print_function
 | 
			
		||||
 | 
			
		||||
import argparse
 | 
			
		||||
import logging
 | 
			
		||||
import csv
 | 
			
		||||
import os
 | 
			
		||||
import random
 | 
			
		||||
import sys
 | 
			
		||||
import glob
 | 
			
		||||
 | 
			
		||||
import numpy as np
 | 
			
		||||
import torch
 | 
			
		||||
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
 | 
			
		||||
                              TensorDataset)
 | 
			
		||||
from torch.utils.data.distributed import DistributedSampler
 | 
			
		||||
from tqdm import tqdm, trange
 | 
			
		||||
 | 
			
		||||
from tensorboardX import SummaryWriter
 | 
			
		||||
 | 
			
		||||
from transformers import (WEIGHTS_NAME, BertConfig,
 | 
			
		||||
                                  BertForMultipleChoice, BertTokenizer)
 | 
			
		||||
 | 
			
		||||
from transformers import AdamW, WarmupLinearSchedule
 | 
			
		||||
 | 
			
		||||
logger = logging.getLogger(__name__)
 | 
			
		||||
 | 
			
		||||
ALL_MODELS = sum((tuple(conf.pretrained_config_archive_map.keys()) \
 | 
			
		||||
                  for conf in [BertConfig]), ())
 | 
			
		||||
 | 
			
		||||
MODEL_CLASSES = {
 | 
			
		||||
    'bert': (BertConfig, BertForMultipleChoice, BertTokenizer),
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
class SwagExample(object):
 | 
			
		||||
    """A single training/test example for the SWAG dataset."""
 | 
			
		||||
    def __init__(self,
 | 
			
		||||
                 swag_id,
 | 
			
		||||
                 context_sentence,
 | 
			
		||||
                 start_ending,
 | 
			
		||||
                 ending_0,
 | 
			
		||||
                 ending_1,
 | 
			
		||||
                 ending_2,
 | 
			
		||||
                 ending_3,
 | 
			
		||||
                 label = None):
 | 
			
		||||
        self.swag_id = swag_id
 | 
			
		||||
        self.context_sentence = context_sentence
 | 
			
		||||
        self.start_ending = start_ending
 | 
			
		||||
        self.endings = [
 | 
			
		||||
            ending_0,
 | 
			
		||||
            ending_1,
 | 
			
		||||
            ending_2,
 | 
			
		||||
            ending_3,
 | 
			
		||||
        ]
 | 
			
		||||
        self.label = label
 | 
			
		||||
 | 
			
		||||
    def __str__(self):
 | 
			
		||||
        return self.__repr__()
 | 
			
		||||
 | 
			
		||||
    def __repr__(self):
 | 
			
		||||
        l = [
 | 
			
		||||
            "swag_id: {}".format(self.swag_id),
 | 
			
		||||
            "context_sentence: {}".format(self.context_sentence),
 | 
			
		||||
            "start_ending: {}".format(self.start_ending),
 | 
			
		||||
            "ending_0: {}".format(self.endings[0]),
 | 
			
		||||
            "ending_1: {}".format(self.endings[1]),
 | 
			
		||||
            "ending_2: {}".format(self.endings[2]),
 | 
			
		||||
            "ending_3: {}".format(self.endings[3]),
 | 
			
		||||
        ]
 | 
			
		||||
 | 
			
		||||
        if self.label is not None:
 | 
			
		||||
            l.append("label: {}".format(self.label))
 | 
			
		||||
 | 
			
		||||
        return ", ".join(l)
 | 
			
		||||
 | 
			
		||||
class InputFeatures(object):
 | 
			
		||||
    def __init__(self,
 | 
			
		||||
                 example_id,
 | 
			
		||||
                 choices_features,
 | 
			
		||||
                 label
 | 
			
		||||
 | 
			
		||||
    ):
 | 
			
		||||
        self.example_id = example_id
 | 
			
		||||
        self.choices_features = [
 | 
			
		||||
            {
 | 
			
		||||
                'input_ids': input_ids,
 | 
			
		||||
                'input_mask': input_mask,
 | 
			
		||||
                'segment_ids': segment_ids
 | 
			
		||||
            }
 | 
			
		||||
            for _, input_ids, input_mask, segment_ids in choices_features
 | 
			
		||||
        ]
 | 
			
		||||
        self.label = label
 | 
			
		||||
 | 
			
		||||
def read_swag_examples(input_file, is_training=True):
 | 
			
		||||
    with open(input_file, 'r', encoding='utf-8') as f:
 | 
			
		||||
        reader = csv.reader(f)
 | 
			
		||||
        lines = []
 | 
			
		||||
        for line in reader:
 | 
			
		||||
            if sys.version_info[0] == 2:
 | 
			
		||||
                line = list(unicode(cell, 'utf-8') for cell in line)
 | 
			
		||||
            lines.append(line)
 | 
			
		||||
 | 
			
		||||
    if is_training and lines[0][-1] != 'label':
 | 
			
		||||
        raise ValueError(
 | 
			
		||||
            "For training, the input file must contain a label column."
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
    examples = [
 | 
			
		||||
        SwagExample(
 | 
			
		||||
            swag_id = line[2],
 | 
			
		||||
            context_sentence = line[4],
 | 
			
		||||
            start_ending = line[5], # in the swag dataset, the
 | 
			
		||||
                                         # common beginning of each
 | 
			
		||||
                                         # choice is stored in "sent2".
 | 
			
		||||
            ending_0 = line[7],
 | 
			
		||||
            ending_1 = line[8],
 | 
			
		||||
            ending_2 = line[9],
 | 
			
		||||
            ending_3 = line[10],
 | 
			
		||||
            label = int(line[11]) if is_training else None
 | 
			
		||||
        ) for line in lines[1:] # we skip the line with the column names
 | 
			
		||||
    ]
 | 
			
		||||
 | 
			
		||||
    return examples
 | 
			
		||||
 | 
			
		||||
def convert_examples_to_features(examples, tokenizer, max_seq_length,
 | 
			
		||||
                                 is_training):
 | 
			
		||||
    """Loads a data file into a list of `InputBatch`s."""
 | 
			
		||||
 | 
			
		||||
    # Swag is a multiple choice task. To perform this task using Bert,
 | 
			
		||||
    # we will use the formatting proposed in "Improving Language
 | 
			
		||||
    # Understanding by Generative Pre-Training" and suggested by
 | 
			
		||||
    # @jacobdevlin-google in this issue
 | 
			
		||||
    # https://github.com/google-research/bert/issues/38.
 | 
			
		||||
    #
 | 
			
		||||
    # Each choice will correspond to a sample on which we run the
 | 
			
		||||
    # inference. For a given Swag example, we will create the 4
 | 
			
		||||
    # following inputs:
 | 
			
		||||
    # - [CLS] context [SEP] choice_1 [SEP]
 | 
			
		||||
    # - [CLS] context [SEP] choice_2 [SEP]
 | 
			
		||||
    # - [CLS] context [SEP] choice_3 [SEP]
 | 
			
		||||
    # - [CLS] context [SEP] choice_4 [SEP]
 | 
			
		||||
    # The model will output a single value for each input. To get the
 | 
			
		||||
    # final decision of the model, we will run a softmax over these 4
 | 
			
		||||
    # outputs.
 | 
			
		||||
    features = []
 | 
			
		||||
    for example_index, example in tqdm(enumerate(examples)):
 | 
			
		||||
        context_tokens = tokenizer.tokenize(example.context_sentence)
 | 
			
		||||
        start_ending_tokens = tokenizer.tokenize(example.start_ending)
 | 
			
		||||
 | 
			
		||||
        choices_features = []
 | 
			
		||||
        for ending_index, ending in enumerate(example.endings):
 | 
			
		||||
            # We create a copy of the context tokens in order to be
 | 
			
		||||
            # able to shrink it according to ending_tokens
 | 
			
		||||
            context_tokens_choice = context_tokens[:]
 | 
			
		||||
            ending_tokens = start_ending_tokens + tokenizer.tokenize(ending)
 | 
			
		||||
            # Modifies `context_tokens_choice` and `ending_tokens` in
 | 
			
		||||
            # place so that the total length is less than the
 | 
			
		||||
            # specified length.  Account for [CLS], [SEP], [SEP] with
 | 
			
		||||
            # "- 3"
 | 
			
		||||
            _truncate_seq_pair(context_tokens_choice, ending_tokens, max_seq_length - 3)
 | 
			
		||||
 | 
			
		||||
            tokens = ["[CLS]"] + context_tokens_choice + ["[SEP]"] + ending_tokens + ["[SEP]"]
 | 
			
		||||
            segment_ids = [0] * (len(context_tokens_choice) + 2) + [1] * (len(ending_tokens) + 1)
 | 
			
		||||
 | 
			
		||||
            input_ids = tokenizer.convert_tokens_to_ids(tokens)
 | 
			
		||||
            input_mask = [1] * len(input_ids)
 | 
			
		||||
 | 
			
		||||
            # Zero-pad up to the sequence length.
 | 
			
		||||
            padding = [0] * (max_seq_length - len(input_ids))
 | 
			
		||||
            input_ids += padding
 | 
			
		||||
            input_mask += padding
 | 
			
		||||
            segment_ids += padding
 | 
			
		||||
 | 
			
		||||
            assert len(input_ids) == max_seq_length
 | 
			
		||||
            assert len(input_mask) == max_seq_length
 | 
			
		||||
            assert len(segment_ids) == max_seq_length
 | 
			
		||||
 | 
			
		||||
            choices_features.append((tokens, input_ids, input_mask, segment_ids))
 | 
			
		||||
 | 
			
		||||
        label = example.label
 | 
			
		||||
        if example_index < 5:
 | 
			
		||||
            logger.info("*** Example ***")
 | 
			
		||||
            logger.info("swag_id: {}".format(example.swag_id))
 | 
			
		||||
            for choice_idx, (tokens, input_ids, input_mask, segment_ids) in enumerate(choices_features):
 | 
			
		||||
                logger.info("choice: {}".format(choice_idx))
 | 
			
		||||
                logger.info("tokens: {}".format(' '.join(tokens)))
 | 
			
		||||
                logger.info("input_ids: {}".format(' '.join(map(str, input_ids))))
 | 
			
		||||
                logger.info("input_mask: {}".format(' '.join(map(str, input_mask))))
 | 
			
		||||
                logger.info("segment_ids: {}".format(' '.join(map(str, segment_ids))))
 | 
			
		||||
            if is_training:
 | 
			
		||||
                logger.info("label: {}".format(label))
 | 
			
		||||
 | 
			
		||||
        features.append(
 | 
			
		||||
            InputFeatures(
 | 
			
		||||
                example_id = example.swag_id,
 | 
			
		||||
                choices_features = choices_features,
 | 
			
		||||
                label = label
 | 
			
		||||
            )
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
    return features
 | 
			
		||||
 | 
			
		||||
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
 | 
			
		||||
    """Truncates a sequence pair in place to the maximum length."""
 | 
			
		||||
 | 
			
		||||
    # This is a simple heuristic which will always truncate the longer sequence
 | 
			
		||||
    # one token at a time. This makes more sense than truncating an equal percent
 | 
			
		||||
    # of tokens from each, since if one sequence is very short then each token
 | 
			
		||||
    # that's truncated likely contains more information than a longer sequence.
 | 
			
		||||
    while True:
 | 
			
		||||
        total_length = len(tokens_a) + len(tokens_b)
 | 
			
		||||
        if total_length <= max_length:
 | 
			
		||||
            break
 | 
			
		||||
        if len(tokens_a) > len(tokens_b):
 | 
			
		||||
            tokens_a.pop()
 | 
			
		||||
        else:
 | 
			
		||||
            tokens_b.pop()
 | 
			
		||||
 | 
			
		||||
def accuracy(out, labels):
 | 
			
		||||
    outputs = np.argmax(out, axis=1)
 | 
			
		||||
    return np.sum(outputs == labels)
 | 
			
		||||
 | 
			
		||||
def select_field(features, field):
 | 
			
		||||
    return [
 | 
			
		||||
        [
 | 
			
		||||
            choice[field]
 | 
			
		||||
            for choice in feature.choices_features
 | 
			
		||||
        ]
 | 
			
		||||
        for feature in features
 | 
			
		||||
    ]
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def set_seed(args):
 | 
			
		||||
    random.seed(args.seed)
 | 
			
		||||
    np.random.seed(args.seed)
 | 
			
		||||
    torch.manual_seed(args.seed)
 | 
			
		||||
    if args.n_gpu > 0:
 | 
			
		||||
        torch.cuda.manual_seed_all(args.seed)
 | 
			
		||||
 | 
			
		||||
def load_and_cache_examples(args, tokenizer, evaluate=False, output_examples=False):
 | 
			
		||||
    if args.local_rank not in [-1, 0]:
 | 
			
		||||
        torch.distributed.barrier()  # Make sure only the first process in distributed training process the dataset, and the others will use the cache
 | 
			
		||||
 | 
			
		||||
    # Load data features from cache or dataset file
 | 
			
		||||
    input_file = args.predict_file if evaluate else args.train_file
 | 
			
		||||
    cached_features_file = os.path.join(os.path.dirname(input_file), 'cached_{}_{}_{}'.format(
 | 
			
		||||
        'dev' if evaluate else 'train',
 | 
			
		||||
        list(filter(None, args.model_name_or_path.split('/'))).pop(),
 | 
			
		||||
        str(args.max_seq_length)))
 | 
			
		||||
    if os.path.exists(cached_features_file) and not args.overwrite_cache and not output_examples:
 | 
			
		||||
        logger.info("Loading features from cached file %s", cached_features_file)
 | 
			
		||||
        features = torch.load(cached_features_file)
 | 
			
		||||
    else:
 | 
			
		||||
        logger.info("Creating features from dataset file at %s", input_file)
 | 
			
		||||
        examples = read_swag_examples(input_file)
 | 
			
		||||
        features = convert_examples_to_features(
 | 
			
		||||
            examples, tokenizer, args.max_seq_length, not evaluate)
 | 
			
		||||
 | 
			
		||||
        if args.local_rank in [-1, 0]:
 | 
			
		||||
            logger.info("Saving features into cached file %s", cached_features_file)
 | 
			
		||||
            torch.save(features, cached_features_file)
 | 
			
		||||
 | 
			
		||||
    if args.local_rank == 0:
 | 
			
		||||
        torch.distributed.barrier()  # Make sure only the first process in distributed training process the dataset, and the others will use the cache
 | 
			
		||||
 | 
			
		||||
    # Convert to Tensors and build dataset
 | 
			
		||||
    all_input_ids = torch.tensor(select_field(features, 'input_ids'), dtype=torch.long)
 | 
			
		||||
    all_input_mask = torch.tensor(select_field(features, 'input_mask'), dtype=torch.long)
 | 
			
		||||
    all_segment_ids = torch.tensor(select_field(features, 'segment_ids'), dtype=torch.long)
 | 
			
		||||
    all_label = torch.tensor([f.label for f in features], dtype=torch.long)
 | 
			
		||||
 | 
			
		||||
    if evaluate:
 | 
			
		||||
        dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids,
 | 
			
		||||
                                all_label)
 | 
			
		||||
    else:
 | 
			
		||||
        dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids,
 | 
			
		||||
                                all_label)
 | 
			
		||||
 | 
			
		||||
    if output_examples:
 | 
			
		||||
        return dataset, examples, features
 | 
			
		||||
    return dataset
 | 
			
		||||
def train(args, train_dataset, model, tokenizer):
 | 
			
		||||
    """ Train the model """
 | 
			
		||||
    if args.local_rank in [-1, 0]:
 | 
			
		||||
        tb_writer = SummaryWriter()
 | 
			
		||||
 | 
			
		||||
    args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
 | 
			
		||||
    train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
 | 
			
		||||
    train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
 | 
			
		||||
 | 
			
		||||
    if args.max_steps > 0:
 | 
			
		||||
        t_total = args.max_steps
 | 
			
		||||
        args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
 | 
			
		||||
    else:
 | 
			
		||||
        t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
 | 
			
		||||
 | 
			
		||||
    # Prepare optimizer and schedule (linear warmup and decay)
 | 
			
		||||
    no_decay = ['bias', 'LayerNorm.weight']
 | 
			
		||||
    optimizer_grouped_parameters = [
 | 
			
		||||
        {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay},
 | 
			
		||||
        {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
 | 
			
		||||
        ]
 | 
			
		||||
    optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
 | 
			
		||||
    scheduler = WarmupLinearSchedule(optimizer, warmup_steps=args.warmup_steps, t_total=t_total)
 | 
			
		||||
    if args.fp16:
 | 
			
		||||
        try:
 | 
			
		||||
            from apex import amp
 | 
			
		||||
        except ImportError:
 | 
			
		||||
            raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
 | 
			
		||||
        model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
 | 
			
		||||
 | 
			
		||||
    # multi-gpu training (should be after apex fp16 initialization)
 | 
			
		||||
    if args.n_gpu > 1:
 | 
			
		||||
        model = torch.nn.DataParallel(model)
 | 
			
		||||
 | 
			
		||||
    # Distributed training (should be after apex fp16 initialization)
 | 
			
		||||
    if args.local_rank != -1:
 | 
			
		||||
        model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],
 | 
			
		||||
                                                          output_device=args.local_rank,
 | 
			
		||||
                                                          find_unused_parameters=True)
 | 
			
		||||
 | 
			
		||||
    # Train!
 | 
			
		||||
    logger.info("***** Running training *****")
 | 
			
		||||
    logger.info("  Num examples = %d", len(train_dataset))
 | 
			
		||||
    logger.info("  Num Epochs = %d", args.num_train_epochs)
 | 
			
		||||
    logger.info("  Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
 | 
			
		||||
    logger.info("  Total train batch size (w. parallel, distributed & accumulation) = %d",
 | 
			
		||||
                   args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1))
 | 
			
		||||
    logger.info("  Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
 | 
			
		||||
    logger.info("  Total optimization steps = %d", t_total)
 | 
			
		||||
 | 
			
		||||
    global_step = 0
 | 
			
		||||
    tr_loss, logging_loss = 0.0, 0.0
 | 
			
		||||
    model.zero_grad()
 | 
			
		||||
    train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0])
 | 
			
		||||
    set_seed(args)  # Added here for reproductibility (even between python 2 and 3)
 | 
			
		||||
    for _ in train_iterator:
 | 
			
		||||
        epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])
 | 
			
		||||
        for step, batch in enumerate(epoch_iterator):
 | 
			
		||||
            model.train()
 | 
			
		||||
            batch = tuple(t.to(args.device) for t in batch)
 | 
			
		||||
            inputs = {'input_ids':       batch[0],
 | 
			
		||||
                      'attention_mask':  batch[1],
 | 
			
		||||
                      #'token_type_ids':  None if args.model_type == 'xlm' else batch[2],
 | 
			
		||||
                      'token_type_ids': batch[2],
 | 
			
		||||
                      'labels':         batch[3]}
 | 
			
		||||
            # if args.model_type in ['xlnet', 'xlm']:
 | 
			
		||||
            #     inputs.update({'cls_index': batch[5],
 | 
			
		||||
            #                    'p_mask':       batch[6]})
 | 
			
		||||
            outputs = model(**inputs)
 | 
			
		||||
            loss = outputs[0]  # model outputs are always tuple in transformers (see doc)
 | 
			
		||||
 | 
			
		||||
            if args.n_gpu > 1:
 | 
			
		||||
                loss = loss.mean() # mean() to average on multi-gpu parallel (not distributed) training
 | 
			
		||||
            if args.gradient_accumulation_steps > 1:
 | 
			
		||||
                loss = loss / args.gradient_accumulation_steps
 | 
			
		||||
 | 
			
		||||
            if args.fp16:
 | 
			
		||||
                with amp.scale_loss(loss, optimizer) as scaled_loss:
 | 
			
		||||
                    scaled_loss.backward()
 | 
			
		||||
                torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
 | 
			
		||||
            else:
 | 
			
		||||
                loss.backward()
 | 
			
		||||
                torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
 | 
			
		||||
 | 
			
		||||
            tr_loss += loss.item()
 | 
			
		||||
            if (step + 1) % args.gradient_accumulation_steps == 0:
 | 
			
		||||
                optimizer.step()
 | 
			
		||||
                scheduler.step()  # Update learning rate schedule
 | 
			
		||||
                model.zero_grad()
 | 
			
		||||
                global_step += 1
 | 
			
		||||
 | 
			
		||||
                if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
 | 
			
		||||
                    # Log metrics
 | 
			
		||||
                    if args.local_rank == -1 and args.evaluate_during_training:  # Only evaluate when single GPU otherwise metrics may not average well
 | 
			
		||||
                        results = evaluate(args, model, tokenizer)
 | 
			
		||||
                        for key, value in results.items():
 | 
			
		||||
                            tb_writer.add_scalar('eval_{}'.format(key), value, global_step)
 | 
			
		||||
                    tb_writer.add_scalar('lr', scheduler.get_lr()[0], global_step)
 | 
			
		||||
                    tb_writer.add_scalar('loss', (tr_loss - logging_loss)/args.logging_steps, global_step)
 | 
			
		||||
                    logging_loss = tr_loss
 | 
			
		||||
 | 
			
		||||
                if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:
 | 
			
		||||
                    # Save model checkpoint
 | 
			
		||||
                    output_dir = os.path.join(args.output_dir, 'checkpoint-{}'.format(global_step))
 | 
			
		||||
                    if not os.path.exists(output_dir):
 | 
			
		||||
                        os.makedirs(output_dir)
 | 
			
		||||
                    model_to_save = model.module if hasattr(model, 'module') else model  # Take care of distributed/parallel training
 | 
			
		||||
                    model_to_save.save_pretrained(output_dir)
 | 
			
		||||
                    tokenizer.save_vocabulary(output_dir)
 | 
			
		||||
                    torch.save(args, os.path.join(output_dir, 'training_args.bin'))
 | 
			
		||||
                    logger.info("Saving model checkpoint to %s", output_dir)
 | 
			
		||||
 | 
			
		||||
            if args.max_steps > 0 and global_step > args.max_steps:
 | 
			
		||||
                epoch_iterator.close()
 | 
			
		||||
                break
 | 
			
		||||
        if args.max_steps > 0 and global_step > args.max_steps:
 | 
			
		||||
            train_iterator.close()
 | 
			
		||||
            break
 | 
			
		||||
 | 
			
		||||
    if args.local_rank in [-1, 0]:
 | 
			
		||||
        tb_writer.close()
 | 
			
		||||
 | 
			
		||||
    return global_step, tr_loss / global_step
 | 
			
		||||
 | 
			
		||||
def evaluate(args, model, tokenizer, prefix=""):
 | 
			
		||||
    dataset, examples, features = load_and_cache_examples(args, tokenizer, evaluate=True, output_examples=True)
 | 
			
		||||
 | 
			
		||||
    if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
 | 
			
		||||
        os.makedirs(args.output_dir)
 | 
			
		||||
 | 
			
		||||
    args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
 | 
			
		||||
    # Note that DistributedSampler samples randomly
 | 
			
		||||
    eval_sampler = SequentialSampler(dataset) if args.local_rank == -1 else DistributedSampler(dataset)
 | 
			
		||||
    eval_dataloader = DataLoader(dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
 | 
			
		||||
 | 
			
		||||
    # Eval!
 | 
			
		||||
    logger.info("***** Running evaluation {} *****".format(prefix))
 | 
			
		||||
    logger.info("  Num examples = %d", len(dataset))
 | 
			
		||||
    logger.info("  Batch size = %d", args.eval_batch_size)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    eval_loss, eval_accuracy = 0, 0
 | 
			
		||||
    nb_eval_steps, nb_eval_examples = 0, 0
 | 
			
		||||
 | 
			
		||||
    for batch in tqdm(eval_dataloader, desc="Evaluating"):
 | 
			
		||||
        model.eval()
 | 
			
		||||
        batch = tuple(t.to(args.device) for t in batch)
 | 
			
		||||
        with torch.no_grad():
 | 
			
		||||
            inputs = {'input_ids':      batch[0],
 | 
			
		||||
                      'attention_mask': batch[1],
 | 
			
		||||
                      # 'token_type_ids': None if args.model_type == 'xlm' else batch[2]  # XLM don't use segment_ids
 | 
			
		||||
                      'token_type_ids': batch[2],
 | 
			
		||||
                      'labels':         batch[3]}
 | 
			
		||||
 | 
			
		||||
            # if args.model_type in ['xlnet', 'xlm']:
 | 
			
		||||
            #     inputs.update({'cls_index': batch[4],
 | 
			
		||||
            #                    'p_mask':    batch[5]})
 | 
			
		||||
            outputs = model(**inputs)
 | 
			
		||||
            tmp_eval_loss, logits = outputs[:2]
 | 
			
		||||
            eval_loss += tmp_eval_loss.mean().item()
 | 
			
		||||
 | 
			
		||||
        logits = logits.detach().cpu().numpy()
 | 
			
		||||
        label_ids = inputs['labels'].to('cpu').numpy()
 | 
			
		||||
        tmp_eval_accuracy = accuracy(logits, label_ids)
 | 
			
		||||
        eval_accuracy += tmp_eval_accuracy
 | 
			
		||||
 | 
			
		||||
        nb_eval_steps += 1
 | 
			
		||||
        nb_eval_examples += inputs['input_ids'].size(0)
 | 
			
		||||
 | 
			
		||||
    eval_loss = eval_loss / nb_eval_steps
 | 
			
		||||
    eval_accuracy = eval_accuracy / nb_eval_examples
 | 
			
		||||
    result = {'eval_loss': eval_loss,
 | 
			
		||||
              'eval_accuracy': eval_accuracy}
 | 
			
		||||
 | 
			
		||||
    output_eval_file = os.path.join(args.output_dir, "eval_results.txt")
 | 
			
		||||
    with open(output_eval_file, "w") as writer:
 | 
			
		||||
        logger.info("***** Eval results *****")
 | 
			
		||||
        for key in sorted(result.keys()):
 | 
			
		||||
            logger.info("%s = %s", key, str(result[key]))
 | 
			
		||||
            writer.write("%s = %s\n" % (key, str(result[key])))
 | 
			
		||||
 | 
			
		||||
    return result
 | 
			
		||||
 | 
			
		||||
def main():
 | 
			
		||||
    parser = argparse.ArgumentParser()
 | 
			
		||||
 | 
			
		||||
    ## Required parameters
 | 
			
		||||
    parser.add_argument("--train_file", default=None, type=str, required=True,
 | 
			
		||||
                        help="SWAG csv for training. E.g., train.csv")
 | 
			
		||||
    parser.add_argument("--predict_file", default=None, type=str, required=True,
 | 
			
		||||
                        help="SWAG csv for predictions. E.g., val.csv or test.csv")
 | 
			
		||||
    parser.add_argument("--model_type", default=None, type=str, required=True,
 | 
			
		||||
                        help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()))
 | 
			
		||||
    parser.add_argument("--model_name_or_path", default=None, type=str, required=True,
 | 
			
		||||
                        help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(ALL_MODELS))
 | 
			
		||||
    parser.add_argument("--output_dir", default=None, type=str, required=True,
 | 
			
		||||
                        help="The output directory where the model checkpoints and predictions will be written.")
 | 
			
		||||
 | 
			
		||||
    ## Other parameters
 | 
			
		||||
    parser.add_argument("--config_name", default="", type=str,
 | 
			
		||||
                        help="Pretrained config name or path if not the same as model_name")
 | 
			
		||||
    parser.add_argument("--tokenizer_name", default="", type=str,
 | 
			
		||||
                        help="Pretrained tokenizer name or path if not the same as model_name")
 | 
			
		||||
    parser.add_argument("--max_seq_length", default=384, type=int,
 | 
			
		||||
                        help="The maximum total input sequence length after tokenization. Sequences "
 | 
			
		||||
                             "longer than this will be truncated, and sequences shorter than this will be padded.")
 | 
			
		||||
    parser.add_argument("--do_train", action='store_true',
 | 
			
		||||
                        help="Whether to run training.")
 | 
			
		||||
    parser.add_argument("--do_eval", action='store_true',
 | 
			
		||||
                        help="Whether to run eval on the dev set.")
 | 
			
		||||
    parser.add_argument("--evaluate_during_training", action='store_true',
 | 
			
		||||
                        help="Rul evaluation during training at each logging step.")
 | 
			
		||||
    parser.add_argument("--do_lower_case", action='store_true',
 | 
			
		||||
                        help="Set this flag if you are using an uncased model.")
 | 
			
		||||
 | 
			
		||||
    parser.add_argument("--per_gpu_train_batch_size", default=8, type=int,
 | 
			
		||||
                        help="Batch size per GPU/CPU for training.")
 | 
			
		||||
    parser.add_argument("--per_gpu_eval_batch_size", default=8, type=int,
 | 
			
		||||
                        help="Batch size per GPU/CPU for evaluation.")
 | 
			
		||||
    parser.add_argument("--learning_rate", default=5e-5, type=float,
 | 
			
		||||
                        help="The initial learning rate for Adam.")
 | 
			
		||||
    parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
 | 
			
		||||
                        help="Number of updates steps to accumulate before performing a backward/update pass.")
 | 
			
		||||
    parser.add_argument("--weight_decay", default=0.0, type=float,
 | 
			
		||||
                        help="Weight deay if we apply some.")
 | 
			
		||||
    parser.add_argument("--adam_epsilon", default=1e-8, type=float,
 | 
			
		||||
                        help="Epsilon for Adam optimizer.")
 | 
			
		||||
    parser.add_argument("--max_grad_norm", default=1.0, type=float,
 | 
			
		||||
                        help="Max gradient norm.")
 | 
			
		||||
    parser.add_argument("--num_train_epochs", default=3.0, type=float,
 | 
			
		||||
                        help="Total number of training epochs to perform.")
 | 
			
		||||
    parser.add_argument("--max_steps", default=-1, type=int,
 | 
			
		||||
                        help="If > 0: set total number of training steps to perform. Override num_train_epochs.")
 | 
			
		||||
    parser.add_argument("--warmup_steps", default=0, type=int,
 | 
			
		||||
                        help="Linear warmup over warmup_steps.")
 | 
			
		||||
 | 
			
		||||
    parser.add_argument('--logging_steps', type=int, default=50,
 | 
			
		||||
                        help="Log every X updates steps.")
 | 
			
		||||
    parser.add_argument('--save_steps', type=int, default=50,
 | 
			
		||||
                        help="Save checkpoint every X updates steps.")
 | 
			
		||||
    parser.add_argument("--eval_all_checkpoints", action='store_true',
 | 
			
		||||
                        help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number")
 | 
			
		||||
    parser.add_argument("--no_cuda", action='store_true',
 | 
			
		||||
                        help="Whether not to use CUDA when available")
 | 
			
		||||
    parser.add_argument('--overwrite_output_dir', action='store_true',
 | 
			
		||||
                        help="Overwrite the content of the output directory")
 | 
			
		||||
    parser.add_argument('--overwrite_cache', action='store_true',
 | 
			
		||||
                        help="Overwrite the cached training and evaluation sets")
 | 
			
		||||
    parser.add_argument('--seed', type=int, default=42,
 | 
			
		||||
                        help="random seed for initialization")
 | 
			
		||||
 | 
			
		||||
    parser.add_argument("--local_rank", type=int, default=-1,
 | 
			
		||||
                        help="local_rank for distributed training on gpus")
 | 
			
		||||
    parser.add_argument('--fp16', action='store_true',
 | 
			
		||||
                        help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit")
 | 
			
		||||
    parser.add_argument('--fp16_opt_level', type=str, default='O1',
 | 
			
		||||
                        help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
 | 
			
		||||
                             "See details at https://nvidia.github.io/apex/amp.html")
 | 
			
		||||
    parser.add_argument('--server_ip', type=str, default='', help="Can be used for distant debugging.")
 | 
			
		||||
    parser.add_argument('--server_port', type=str, default='', help="Can be used for distant debugging.")
 | 
			
		||||
    args = parser.parse_args()
 | 
			
		||||
 | 
			
		||||
    if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir:
 | 
			
		||||
        raise ValueError("Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(args.output_dir))
 | 
			
		||||
 | 
			
		||||
    # Setup distant debugging if needed
 | 
			
		||||
    if args.server_ip and args.server_port:
 | 
			
		||||
        # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
 | 
			
		||||
        import ptvsd
 | 
			
		||||
        print("Waiting for debugger attach")
 | 
			
		||||
        ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
 | 
			
		||||
        ptvsd.wait_for_attach()
 | 
			
		||||
 | 
			
		||||
    # Setup CUDA, GPU & distributed training
 | 
			
		||||
    if args.local_rank == -1 or args.no_cuda:
 | 
			
		||||
        device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
 | 
			
		||||
        args.n_gpu = torch.cuda.device_count()
 | 
			
		||||
    else:  # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
 | 
			
		||||
        torch.cuda.set_device(args.local_rank)
 | 
			
		||||
        device = torch.device("cuda", args.local_rank)
 | 
			
		||||
        torch.distributed.init_process_group(backend='nccl')
 | 
			
		||||
        args.n_gpu = 1
 | 
			
		||||
    args.device = device
 | 
			
		||||
 | 
			
		||||
    # Setup logging
 | 
			
		||||
    logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s -   %(message)s',
 | 
			
		||||
                        datefmt = '%m/%d/%Y %H:%M:%S',
 | 
			
		||||
                        level = logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
 | 
			
		||||
    logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
 | 
			
		||||
                    args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16)
 | 
			
		||||
 | 
			
		||||
    # Set seed
 | 
			
		||||
    set_seed(args)
 | 
			
		||||
 | 
			
		||||
    # Load pretrained model and tokenizer
 | 
			
		||||
    if args.local_rank not in [-1, 0]:
 | 
			
		||||
        torch.distributed.barrier()  # Make sure only the first process in distributed training will download model & vocab
 | 
			
		||||
 | 
			
		||||
    args.model_type = args.model_type.lower()
 | 
			
		||||
    config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
 | 
			
		||||
    config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path)
 | 
			
		||||
    tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, do_lower_case=args.do_lower_case)
 | 
			
		||||
    model = model_class.from_pretrained(args.model_name_or_path, from_tf=bool('.ckpt' in args.model_name_or_path), config=config)
 | 
			
		||||
 | 
			
		||||
    if args.local_rank == 0:
 | 
			
		||||
        torch.distributed.barrier()  # Make sure only the first process in distributed training will download model & vocab
 | 
			
		||||
 | 
			
		||||
    model.to(args.device)
 | 
			
		||||
 | 
			
		||||
    logger.info("Training/evaluation parameters %s", args)
 | 
			
		||||
 | 
			
		||||
    # Training
 | 
			
		||||
    if args.do_train:
 | 
			
		||||
        train_dataset = load_and_cache_examples(args, tokenizer, evaluate=False, output_examples=False)
 | 
			
		||||
        global_step, tr_loss = train(args, train_dataset, model, tokenizer)
 | 
			
		||||
        logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    # Save the trained model and the tokenizer
 | 
			
		||||
    if args.local_rank == -1 or torch.distributed.get_rank() == 0:
 | 
			
		||||
        # Create output directory if needed
 | 
			
		||||
        if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
 | 
			
		||||
            os.makedirs(args.output_dir)
 | 
			
		||||
 | 
			
		||||
        logger.info("Saving model checkpoint to %s", args.output_dir)
 | 
			
		||||
        # Save a trained model, configuration and tokenizer using `save_pretrained()`.
 | 
			
		||||
        # They can then be reloaded using `from_pretrained()`
 | 
			
		||||
        model_to_save = model.module if hasattr(model, 'module') else model  # Take care of distributed/parallel training
 | 
			
		||||
        model_to_save.save_pretrained(args.output_dir)
 | 
			
		||||
        tokenizer.save_pretrained(args.output_dir)
 | 
			
		||||
 | 
			
		||||
        # Good practice: save your training arguments together with the trained model
 | 
			
		||||
        torch.save(args, os.path.join(args.output_dir, 'training_args.bin'))
 | 
			
		||||
 | 
			
		||||
        # Load a trained model and vocabulary that you have fine-tuned
 | 
			
		||||
        model = model_class.from_pretrained(args.output_dir)
 | 
			
		||||
        tokenizer = tokenizer_class.from_pretrained(args.output_dir)
 | 
			
		||||
        model.to(args.device)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    # Evaluation - we can ask to evaluate all the checkpoints (sub-directories) in a directory
 | 
			
		||||
    results = {}
 | 
			
		||||
    if args.do_eval and args.local_rank in [-1, 0]:
 | 
			
		||||
        if args.do_train:
 | 
			
		||||
            checkpoints = [args.output_dir]
 | 
			
		||||
        else:
 | 
			
		||||
            # if do_train is False and do_eval is true, load model directly from pretrained.
 | 
			
		||||
            checkpoints = [args.model_name_or_path]
 | 
			
		||||
 | 
			
		||||
        if args.eval_all_checkpoints:
 | 
			
		||||
            checkpoints = list(os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True)))
 | 
			
		||||
            logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN)  # Reduce model loading logs
 | 
			
		||||
 | 
			
		||||
        logger.info("Evaluate the following checkpoints: %s", checkpoints)
 | 
			
		||||
 | 
			
		||||
        for checkpoint in checkpoints:
 | 
			
		||||
            # Reload the model
 | 
			
		||||
            global_step = checkpoint.split('-')[-1] if len(checkpoints) > 1 else ""
 | 
			
		||||
            model = model_class.from_pretrained(checkpoint)
 | 
			
		||||
            tokenizer = tokenizer_class.from_pretrained(checkpoint)
 | 
			
		||||
            model.to(args.device)
 | 
			
		||||
 | 
			
		||||
            # Evaluate
 | 
			
		||||
            result = evaluate(args, model, tokenizer, prefix=global_step)
 | 
			
		||||
 | 
			
		||||
            result = dict((k + ('_{}'.format(global_step) if global_step else ''), v) for k, v in result.items())
 | 
			
		||||
            results.update(result)
 | 
			
		||||
 | 
			
		||||
    logger.info("Results: {}".format(results))
 | 
			
		||||
 | 
			
		||||
    return results
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
if __name__ == "__main__":
 | 
			
		||||
    main()
 | 
			
		||||
@ -28,7 +28,7 @@ import math
 | 
			
		||||
 | 
			
		||||
import torch
 | 
			
		||||
 | 
			
		||||
from pytorch_pretrained_bert import TransfoXLLMHeadModel, TransfoXLCorpus, TransfoXLTokenizer
 | 
			
		||||
from transformers import TransfoXLLMHeadModel, TransfoXLCorpus, TransfoXLTokenizer
 | 
			
		||||
 | 
			
		||||
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s -   %(message)s',
 | 
			
		||||
                    datefmt = '%m/%d/%Y %H:%M:%S',
 | 
			
		||||
@ -113,8 +113,8 @@ def main():
 | 
			
		||||
        with torch.no_grad():
 | 
			
		||||
            mems = None
 | 
			
		||||
            for idx, (data, target, seq_len) in enumerate(eval_iter):
 | 
			
		||||
                ret = model(data, target, mems)
 | 
			
		||||
                loss, mems = ret
 | 
			
		||||
                ret = model(data, lm_labels=target, mems=mems)
 | 
			
		||||
                loss, _, mems = ret
 | 
			
		||||
                loss = loss.mean()
 | 
			
		||||
                total_loss += seq_len * loss.item()
 | 
			
		||||
                total_len += seq_len
 | 
			
		||||
							
								
								
									
										115
									
								
								examples/distillation/README.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						@ -0,0 +1,115 @@
 | 
			
		||||
# DistilBERT
 | 
			
		||||
 | 
			
		||||
This folder contains the original code used to train DistilBERT as well as examples showcasing how to use DistilBERT.
 | 
			
		||||
 | 
			
		||||
**2019, September 19th - Update:** We fixed bugs in the code and released an upadted version of the weights trained with a modification of the distillation loss. DistilBERT now reaches 97% of `BERT-base`'s performance on GLUE, and 86.9 F1 score on SQuAD v1.1 dev set (compared to 88.5 for `BERT-base`). We will publish a formal write-up of our approach in the near future!
 | 
			
		||||
 | 
			
		||||
## What is DistilBERT
 | 
			
		||||
 | 
			
		||||
DistilBERT stands for Distillated-BERT. DistilBERT is a small, fast, cheap and light Transformer model based on Bert architecture. It has 40% less parameters than `bert-base-uncased`, runs 60% faster while preserving 97% of BERT's performances as measured on the GLUE language understanding benchmark. DistilBERT is trained using knowledge distillation, a technique to compress a large model called the teacher into a smaller model called the student. By distillating Bert, we obtain a smaller Transformer model that bears a lot of similarities with the original BERT model while being lighter, smaller and faster to run. DistilBERT is thus an interesting option to put large-scaled trained Transformer model into production.
 | 
			
		||||
 | 
			
		||||
For more information on DistilBERT, please refer to our [detailed blog post](https://medium.com/huggingface/smaller-faster-cheaper-lighter-introducing-distilbert-a-distilled-version-of-bert-8cf3380435b5
 | 
			
		||||
). *Please note that we will publish a formal write-up with updated and more complete results in the near future (September 19th).*
 | 
			
		||||
 | 
			
		||||
Here's the updated results on the dev sets of GLUE:
 | 
			
		||||
 | 
			
		||||
| Model      | Macro-score | CoLA | MNLI | MRPC | QNLI | QQP | RTE | SST-2 | STS-B | WNLI |
 | 
			
		||||
| :---:      |    :---:    | :---:| :---:| :---:| :---:| :---:| :---:| :---:| :---:| :---:|
 | 
			
		||||
| BERT-base  |  **77.6**   | 48.9 | 84.3 | 88.6 | 89.3 | 89.5 | 71.3 | 91.7 | 91.2 | 43.7 |
 | 
			
		||||
| DistilBERT |  **75.2**   | 49.1 | 81.8 | 90.2 | 87.0 | 89.2 | 62.9 | 92.7 | 90.7 | 44.4 |
 | 
			
		||||
 | 
			
		||||
## Setup
 | 
			
		||||
 | 
			
		||||
This part of the library has only be tested with Python3.6+. There are few specific dependencies to install before launching a distillation, you can install them with the command `pip install -r requirements.txt`. 
 | 
			
		||||
 | 
			
		||||
**Important note:** The training scripts have been updated to support PyTorch v1.2.0 (there are breakings changes compared to v1.1.0). It is important to note that there is a small internal bug in the current version of PyTorch available on pip that causes a memory leak in our training/distillation. It has been recently fixed and will likely be integrated into the next release. For the moment, we recommend to [compile PyTorch from source](https://github.com/pytorch/pytorch#from-source). Please refer to [issue 1179](https://github.com/huggingface/transformers/issues/1179) for more details.
 | 
			
		||||
 | 
			
		||||
## How to use DistilBERT
 | 
			
		||||
 | 
			
		||||
Transformers includes two pre-trained DistilBERT models, currently only provided for English (we are investigating the possibility to train and release a multilingual version of DistilBERT):
 | 
			
		||||
 | 
			
		||||
- `distilbert-base-uncased`: DistilBERT English language model pretrained on the same data used to pretrain Bert (concatenation of the Toronto Book Corpus and full English Wikipedia) using distillation with the supervision of the `bert-base-uncased` version of Bert. The model has 6 layers, 768 dimension and 12 heads, totalizing 66M parameters.
 | 
			
		||||
- `distilbert-base-uncased-distilled-squad`: A finetuned version of `distilbert-base-uncased` finetuned using (a second step of) knwoledge distillation on SQuAD 1.0. This model reaches a F1 score of 86.9 on the dev set (for comparison, Bert `bert-base-uncased` version reaches a 88.5 F1 score).
 | 
			
		||||
 | 
			
		||||
Using DistilBERT is very similar to using BERT. DistilBERT share the same tokenizer as BERT's `bert-base-uncased` even though we provide a link to this tokenizer under the `DistilBertTokenizer` name to have a consistent naming between the library models.
 | 
			
		||||
 | 
			
		||||
```python
 | 
			
		||||
tokenizer = DistilBertTokenizer.from_pretrained('distilbert-base-uncased')
 | 
			
		||||
model = DistilBertModel.from_pretrained('distilbert-base-uncased')
 | 
			
		||||
 | 
			
		||||
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0)
 | 
			
		||||
outputs = model(input_ids)
 | 
			
		||||
last_hidden_states = outputs[0]  # The last hidden-state is the first element of the output tuple
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
## How to train DistilBERT
 | 
			
		||||
 | 
			
		||||
In the following, we will explain how you can train your own compressed model.
 | 
			
		||||
 | 
			
		||||
### A. Preparing the data
 | 
			
		||||
 | 
			
		||||
The weights we release are trained using a concatenation of Toronto Book Corpus and English Wikipedia (same training data as the English version of BERT).
 | 
			
		||||
 | 
			
		||||
To avoid processing the data several time, we do it once and for all before the training. From now on, will suppose that you have a text file `dump.txt` which contains one sequence per line (a sequence being composed of one of several coherent sentences).
 | 
			
		||||
 | 
			
		||||
First, we will binarize the data, i.e. tokenize the data and convert each token in an index in our model's vocabulary.
 | 
			
		||||
 | 
			
		||||
```bash
 | 
			
		||||
python scripts/binarized_data.py \
 | 
			
		||||
    --file_path data/dump.txt \
 | 
			
		||||
    --bert_tokenizer bert-base-uncased \
 | 
			
		||||
    --dump_file data/binarized_text
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
Our implementation of masked language modeling loss follows [XLM](https://github.com/facebookresearch/XLM)'s one and smoothes the probability of masking with a factor that put more emphasis on rare words. Thus we count the occurences of each tokens in the data:
 | 
			
		||||
 | 
			
		||||
```bash
 | 
			
		||||
python scripts/token_counts.py \
 | 
			
		||||
    --data_file data/binarized_text.bert-base-uncased.pickle \
 | 
			
		||||
    --token_counts_dump data/token_counts.bert-base-uncased.pickle
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
### B. Training
 | 
			
		||||
 | 
			
		||||
Training with distillation is really simple once you have pre-processed the data:
 | 
			
		||||
 | 
			
		||||
```bash
 | 
			
		||||
python train.py \
 | 
			
		||||
    --dump_path serialization_dir/my_first_training \
 | 
			
		||||
    --data_file data/binarized_text.bert-base-uncased.pickle \
 | 
			
		||||
    --token_counts data/token_counts.bert-base-uncased.pickle \
 | 
			
		||||
    --force # overwrites the `dump_path` if it already exists.
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
By default, this will launch a training on a single GPU (even if more are available on the cluster). Other parameters are available in the command line, please look in `train.py` or run `python train.py --help` to list them.
 | 
			
		||||
 | 
			
		||||
We highly encourage you to use distributed training for training DistilBert as the training corpus is quite large. Here's an example that runs a distributed training on a single node having 4 GPUs:
 | 
			
		||||
 | 
			
		||||
```bash
 | 
			
		||||
export NODE_RANK=0
 | 
			
		||||
export N_NODES=1
 | 
			
		||||
 | 
			
		||||
export N_GPU_NODE=4
 | 
			
		||||
export WORLD_SIZE=4
 | 
			
		||||
export MASTER_PORT=<AN_OPEN_PORT>
 | 
			
		||||
export MASTER_ADDR=<I.P.>
 | 
			
		||||
 | 
			
		||||
pkill -f 'python -u train.py'
 | 
			
		||||
 | 
			
		||||
python -m torch.distributed.launch \
 | 
			
		||||
    --nproc_per_node=$N_GPU_NODE \
 | 
			
		||||
    --nnodes=$N_NODES \
 | 
			
		||||
    --node_rank $NODE_RANK \
 | 
			
		||||
    --master_addr $MASTER_ADDR \
 | 
			
		||||
    --master_port $MASTER_PORT \
 | 
			
		||||
    train.py \
 | 
			
		||||
        --force \
 | 
			
		||||
        --n_gpu $WORLD_SIZE \
 | 
			
		||||
        --data_file data/binarized_text.bert-base-uncased.pickle \
 | 
			
		||||
        --token_counts data/token_counts.bert-base-uncased.pickle \
 | 
			
		||||
        --dump_path serialization_dir/my_first_distillation
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
**Tips:** Starting distillated training with good initialization of the model weights is crucial to reach decent performance. In our experiments, we initialized our model from a few layers of the teacher (Bert) itself! Please refer to `scripts/extract_for_distil.py` to create a valid initialization checkpoint and use `--from_pretrained_weights` and `--from_pretrained_config` arguments to use this initialization for the distilled training!
 | 
			
		||||
 | 
			
		||||
Happy distillation!
 | 
			
		||||
							
								
								
									
										201
									
								
								examples/distillation/dataset.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						@ -0,0 +1,201 @@
 | 
			
		||||
# coding=utf-8
 | 
			
		||||
# Copyright 2019-present, the HuggingFace Inc. team and Facebook, Inc.
 | 
			
		||||
#
 | 
			
		||||
# Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
# you may not use this file except in compliance with the License.
 | 
			
		||||
# You may obtain a copy of the License at
 | 
			
		||||
#
 | 
			
		||||
#     http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
#
 | 
			
		||||
# Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
# distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
# See the License for the specific language governing permissions and
 | 
			
		||||
# limitations under the License.
 | 
			
		||||
""" Dataloaders to train DistilBERT
 | 
			
		||||
    adapted in part from Facebook, Inc XLM model (https://github.com/facebookresearch/XLM)
 | 
			
		||||
"""
 | 
			
		||||
from typing import List
 | 
			
		||||
import math
 | 
			
		||||
from itertools import chain
 | 
			
		||||
from collections import Counter
 | 
			
		||||
import numpy as np
 | 
			
		||||
import torch
 | 
			
		||||
 | 
			
		||||
from utils import logger
 | 
			
		||||
 | 
			
		||||
class Dataset:
 | 
			
		||||
    def __init__(self,
 | 
			
		||||
                 params,
 | 
			
		||||
                 data):
 | 
			
		||||
        self.params = params
 | 
			
		||||
        self.tokens_per_batch = params.tokens_per_batch
 | 
			
		||||
        self.batch_size = params.batch_size
 | 
			
		||||
        self.shuffle = params.shuffle
 | 
			
		||||
        self.group_by_size = params.group_by_size
 | 
			
		||||
 | 
			
		||||
        self.token_ids = np.array(data)
 | 
			
		||||
        self.lengths = np.uint16([len(t) for t in data])
 | 
			
		||||
 | 
			
		||||
        self.check()
 | 
			
		||||
        self.remove_long_sequences()
 | 
			
		||||
        self.remove_empty_sequences()
 | 
			
		||||
        self.check()
 | 
			
		||||
        self.print_statistics()
 | 
			
		||||
 | 
			
		||||
    def __len__(self):
 | 
			
		||||
        return len(self.lengths)
 | 
			
		||||
 | 
			
		||||
    def check(self):
 | 
			
		||||
        """
 | 
			
		||||
        Some sanity checks
 | 
			
		||||
        """
 | 
			
		||||
        assert len(self.token_ids) == len(self.lengths)
 | 
			
		||||
 | 
			
		||||
    def remove_long_sequences(self):
 | 
			
		||||
        """
 | 
			
		||||
        Sequences that are too long are splitted by chunk of max_position_embeddings.
 | 
			
		||||
        """
 | 
			
		||||
        indices = self.lengths >= self.params.max_position_embeddings
 | 
			
		||||
        logger.info(f'Splitting {sum(indices)} too long sequences.')
 | 
			
		||||
 | 
			
		||||
        def divide_chunks(l, n):
 | 
			
		||||
            return [l[i:i + n] for i in range(0, len(l), n)]
 | 
			
		||||
 | 
			
		||||
        new_tok_ids = []
 | 
			
		||||
        new_lengths = []
 | 
			
		||||
        cls_id, sep_id = self.params.special_tok_ids['cls_token'], self.params.special_tok_ids['sep_token']
 | 
			
		||||
        max_len = self.params.max_position_embeddings
 | 
			
		||||
 | 
			
		||||
        for seq_, len_ in zip(self.token_ids, self.lengths):
 | 
			
		||||
            if len_ <= max_len:
 | 
			
		||||
                new_tok_ids.append(seq_)
 | 
			
		||||
                new_lengths.append(len_)
 | 
			
		||||
            else:
 | 
			
		||||
                sub_seqs = []
 | 
			
		||||
                for sub_s in divide_chunks(seq_, max_len-2):
 | 
			
		||||
                    if sub_s[0] != cls_id:
 | 
			
		||||
                        sub_s = np.insert(sub_s, 0, cls_id)
 | 
			
		||||
                    if sub_s[-1] != sep_id:
 | 
			
		||||
                        sub_s = np.insert(sub_s, len(sub_s), sep_id)
 | 
			
		||||
                    assert len(sub_s) <= max_len
 | 
			
		||||
                    sub_seqs.append(sub_s)
 | 
			
		||||
 | 
			
		||||
                new_tok_ids.extend(sub_seqs)
 | 
			
		||||
                new_lengths.extend([len(l) for l in sub_seqs])
 | 
			
		||||
 | 
			
		||||
        self.token_ids = np.array(new_tok_ids)
 | 
			
		||||
        self.lengths = np.array(new_lengths)
 | 
			
		||||
 | 
			
		||||
    def remove_empty_sequences(self):
 | 
			
		||||
        """
 | 
			
		||||
        Too short sequences are simply removed. This could be tunedd.
 | 
			
		||||
        """
 | 
			
		||||
        init_size = len(self)
 | 
			
		||||
        indices = self.lengths > 11
 | 
			
		||||
        self.token_ids = self.token_ids[indices]
 | 
			
		||||
        self.lengths = self.lengths[indices]
 | 
			
		||||
        new_size = len(self)
 | 
			
		||||
        logger.info(f'Remove {init_size - new_size} too short (<=11 tokens) sequences.')
 | 
			
		||||
 | 
			
		||||
    def print_statistics(self):
 | 
			
		||||
        """
 | 
			
		||||
        Print some statistics on the corpus. Only the master process.
 | 
			
		||||
        """
 | 
			
		||||
        if not self.params.is_master:
 | 
			
		||||
            return
 | 
			
		||||
        logger.info(f'{len(self)} sequences')
 | 
			
		||||
        # data_len = sum(self.lengths)
 | 
			
		||||
        # nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
 | 
			
		||||
        # logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
 | 
			
		||||
 | 
			
		||||
        # unk_idx = self.params.special_tok_ids['unk_token']
 | 
			
		||||
        # nb_unkown = sum([(t==unk_idx).sum() for t in self.token_ids])
 | 
			
		||||
        # logger.info(f'{nb_unkown} unknown tokens (covering {100*nb_unkown/data_len:.2f}% of the data)')
 | 
			
		||||
 | 
			
		||||
    def select_data(self, a: int, b: int):
 | 
			
		||||
        """
 | 
			
		||||
        Select a subportion of the data.
 | 
			
		||||
        """
 | 
			
		||||
        n_sequences = len(self)
 | 
			
		||||
        assert 0 <= a < b <= n_sequences, ValueError(f'`0 <= a < b <= n_sequences` is not met with a={a} and b={b}')
 | 
			
		||||
 | 
			
		||||
        logger.info(f'Selecting sequences from {a} to {b} (excluded).')
 | 
			
		||||
        self.token_ids = self.token_ids[a:b]
 | 
			
		||||
        self.lengths = self.lengths[a:b]
 | 
			
		||||
 | 
			
		||||
        self.check()
 | 
			
		||||
 | 
			
		||||
    def split(self):
 | 
			
		||||
        """
 | 
			
		||||
        Distributed training: split the data accross the processes.
 | 
			
		||||
        """
 | 
			
		||||
        assert self.params.n_gpu > 1
 | 
			
		||||
        logger.info('Splitting the data accross the processuses.')
 | 
			
		||||
        n_seq = len(self)
 | 
			
		||||
        n_seq_per_procesus = n_seq // self.params.world_size
 | 
			
		||||
        a = n_seq_per_procesus * self.params.global_rank
 | 
			
		||||
        b = a + n_seq_per_procesus
 | 
			
		||||
        self.select_data(a=a, b=b)
 | 
			
		||||
 | 
			
		||||
    def batch_sequences(self,
 | 
			
		||||
                        token_ids: List[List[int]],
 | 
			
		||||
                        lengths: List[int]):
 | 
			
		||||
        """
 | 
			
		||||
        Do the padding and transform into torch.tensor.
 | 
			
		||||
        """
 | 
			
		||||
        assert len(token_ids) == len(lengths)
 | 
			
		||||
 | 
			
		||||
        # Max for paddings
 | 
			
		||||
        max_seq_len_ = max(lengths)
 | 
			
		||||
 | 
			
		||||
        # Pad token ids
 | 
			
		||||
        pad_idx = self.params.special_tok_ids['pad_token']
 | 
			
		||||
        tk_ = [list(t.astype(int)) + [pad_idx]*(max_seq_len_-len(t)) for t in token_ids]
 | 
			
		||||
        assert len(tk_) == len(token_ids)
 | 
			
		||||
        assert all(len(t) == max_seq_len_ for t in tk_)
 | 
			
		||||
 | 
			
		||||
        tk_t = torch.tensor(tk_)                  # (bs, max_seq_len_)
 | 
			
		||||
        lg_t = torch.tensor(lengths.astype(int))  # (bs)
 | 
			
		||||
        return tk_t, lg_t
 | 
			
		||||
 | 
			
		||||
    def get_batches_iterator(self,
 | 
			
		||||
                             batches):
 | 
			
		||||
        """
 | 
			
		||||
        Return an iterator over batches.
 | 
			
		||||
        """
 | 
			
		||||
        for sequences_ids in batches:
 | 
			
		||||
            token_ids, lengths = self.batch_sequences(self.token_ids[sequences_ids],
 | 
			
		||||
                                                    self.lengths[sequences_ids])
 | 
			
		||||
            yield (token_ids, lengths)
 | 
			
		||||
 | 
			
		||||
    def get_iterator(self,
 | 
			
		||||
                     seed: int = None):
 | 
			
		||||
        """
 | 
			
		||||
        Return a data iterator.
 | 
			
		||||
        """
 | 
			
		||||
        rng = np.random.RandomState(seed)
 | 
			
		||||
 | 
			
		||||
        n_sequences = len(self)
 | 
			
		||||
        indices = np.arange(n_sequences)
 | 
			
		||||
 | 
			
		||||
        if self.group_by_size:
 | 
			
		||||
            indices = indices[np.argsort(self.lengths[indices], kind='mergesort')]
 | 
			
		||||
 | 
			
		||||
        if self.tokens_per_batch == -1:
 | 
			
		||||
            batches = np.array_split(indices, math.ceil(len(indices) * 1. / self.batch_size))
 | 
			
		||||
        else:
 | 
			
		||||
            assert self.tokens_per_batch > 0
 | 
			
		||||
            batch_ids = np.cumsum(self.lengths[indices]) // self.tokens_per_batch
 | 
			
		||||
            _, bounds = np.unique(batch_ids, return_index=True)
 | 
			
		||||
            batches = [indices[bounds[i]:bounds[i + 1]] for i in range(len(bounds) - 1)]
 | 
			
		||||
            if bounds[-1] < len(indices):
 | 
			
		||||
                batches.append(indices[bounds[-1]:])
 | 
			
		||||
 | 
			
		||||
        if self.shuffle:
 | 
			
		||||
            rng.shuffle(batches)
 | 
			
		||||
 | 
			
		||||
        assert n_sequences == sum([len(x) for x in batches])
 | 
			
		||||
        assert self.lengths[indices].sum() == sum([self.lengths[x].sum() for x in batches])
 | 
			
		||||
 | 
			
		||||
        return self.get_batches_iterator(batches=batches)
 | 
			
		||||
							
								
								
									
										490
									
								
								examples/distillation/distiller.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						@ -0,0 +1,490 @@
 | 
			
		||||
# coding=utf-8
 | 
			
		||||
# Copyright 2019-present, the HuggingFace Inc. team and Facebook, Inc.
 | 
			
		||||
#
 | 
			
		||||
# Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
# you may not use this file except in compliance with the License.
 | 
			
		||||
# You may obtain a copy of the License at
 | 
			
		||||
#
 | 
			
		||||
#     http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
#
 | 
			
		||||
# Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
# distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
# See the License for the specific language governing permissions and
 | 
			
		||||
# limitations under the License.
 | 
			
		||||
""" The distiller to distil DistilBERT
 | 
			
		||||
    adapted in part from Facebook, Inc XLM model (https://github.com/facebookresearch/XLM)
 | 
			
		||||
"""
 | 
			
		||||
import os
 | 
			
		||||
import math
 | 
			
		||||
import psutil
 | 
			
		||||
import time
 | 
			
		||||
from tensorboardX import SummaryWriter
 | 
			
		||||
from tqdm import trange, tqdm
 | 
			
		||||
import numpy as np
 | 
			
		||||
import psutil
 | 
			
		||||
 | 
			
		||||
import torch
 | 
			
		||||
import torch.nn as nn
 | 
			
		||||
import torch.nn.functional as F
 | 
			
		||||
from torch.optim import AdamW
 | 
			
		||||
 | 
			
		||||
from transformers import WarmupLinearSchedule
 | 
			
		||||
 | 
			
		||||
from utils import logger
 | 
			
		||||
from dataset import Dataset
 | 
			
		||||
 | 
			
		||||
class Distiller:
 | 
			
		||||
    def __init__(self,
 | 
			
		||||
                 params: dict,
 | 
			
		||||
                 dataloader: Dataset,
 | 
			
		||||
                 token_probs: torch.tensor,
 | 
			
		||||
                 student: nn.Module,
 | 
			
		||||
                 teacher: nn.Module):
 | 
			
		||||
        logger.info('Initializing Distiller')
 | 
			
		||||
        self.params = params
 | 
			
		||||
        self.dump_path = params.dump_path
 | 
			
		||||
        self.multi_gpu = params.multi_gpu
 | 
			
		||||
        self.fp16 = params.fp16
 | 
			
		||||
 | 
			
		||||
        self.student = student
 | 
			
		||||
        self.teacher = teacher
 | 
			
		||||
 | 
			
		||||
        self.dataloader = dataloader
 | 
			
		||||
        if self.params.n_gpu > 1:
 | 
			
		||||
            self.dataloader.split()
 | 
			
		||||
        self.get_iterator(seed=params.seed)
 | 
			
		||||
 | 
			
		||||
        self.temperature = params.temperature
 | 
			
		||||
        assert self.temperature > 0.
 | 
			
		||||
 | 
			
		||||
        self.alpha_ce = params.alpha_ce
 | 
			
		||||
        self.alpha_mlm = params.alpha_mlm
 | 
			
		||||
        self.alpha_mse = params.alpha_mse
 | 
			
		||||
        self.alpha_cos = params.alpha_cos
 | 
			
		||||
        assert self.alpha_ce >= 0.
 | 
			
		||||
        assert self.alpha_mlm >= 0.
 | 
			
		||||
        assert self.alpha_mse >= 0.
 | 
			
		||||
        assert self.alpha_cos >= 0.
 | 
			
		||||
        assert self.alpha_ce + self.alpha_mlm + self.alpha_mse + self.alpha_cos > 0.
 | 
			
		||||
 | 
			
		||||
        self.mlm_mask_prop = params.mlm_mask_prop
 | 
			
		||||
        assert 0.0 <= self.mlm_mask_prop <= 1.0
 | 
			
		||||
        assert params.word_mask + params.word_keep + params.word_rand == 1.0
 | 
			
		||||
        self.pred_probs = torch.FloatTensor([params.word_mask, params.word_keep, params.word_rand])
 | 
			
		||||
        self.pred_probs = self.pred_probs.to(f'cuda:{params.local_rank}') if params.n_gpu > 0 else self.pred_probs
 | 
			
		||||
        self.token_probs = token_probs.to(f'cuda:{params.local_rank}') if params.n_gpu > 0 else token_probs
 | 
			
		||||
        if self.fp16:
 | 
			
		||||
            self.pred_probs = self.pred_probs.half()
 | 
			
		||||
            self.token_probs = self.token_probs.half()
 | 
			
		||||
 | 
			
		||||
        self.epoch = 0
 | 
			
		||||
        self.n_iter = 0
 | 
			
		||||
        self.n_total_iter = 0
 | 
			
		||||
        self.n_sequences_epoch = 0
 | 
			
		||||
        self.total_loss_epoch = 0
 | 
			
		||||
        self.last_loss = 0
 | 
			
		||||
        self.last_loss_ce = 0
 | 
			
		||||
        self.last_loss_mlm = 0
 | 
			
		||||
        if self.alpha_mse > 0.: self.last_loss_mse = 0
 | 
			
		||||
        if self.alpha_cos > 0.: self.last_loss_cos = 0
 | 
			
		||||
        self.last_log = 0
 | 
			
		||||
 | 
			
		||||
        self.ce_loss_fct = nn.KLDivLoss(reduction='batchmean')
 | 
			
		||||
        self.mlm_loss_fct = nn.CrossEntropyLoss(ignore_index=-1)
 | 
			
		||||
        if self.alpha_mse > 0.:
 | 
			
		||||
            self.mse_loss_fct = nn.MSELoss(reduction='sum')
 | 
			
		||||
        if self.alpha_cos > 0.:
 | 
			
		||||
            self.cosine_loss_fct = nn.CosineEmbeddingLoss(reduction='mean')
 | 
			
		||||
 | 
			
		||||
        logger.info('--- Initializing model optimizer')
 | 
			
		||||
        assert params.gradient_accumulation_steps >= 1
 | 
			
		||||
        self.num_steps_epoch = int(len(self.dataloader) / params.batch_size) + 1
 | 
			
		||||
        num_train_optimization_steps = int(self.num_steps_epoch / params.gradient_accumulation_steps * params.n_epoch) + 1
 | 
			
		||||
 | 
			
		||||
        no_decay = ['bias', 'LayerNorm.weight']
 | 
			
		||||
        optimizer_grouped_parameters = [
 | 
			
		||||
            {'params': [p for n, p in student.named_parameters() if not any(nd in n for nd in no_decay) and p.requires_grad], 'weight_decay': params.weight_decay},
 | 
			
		||||
            {'params': [p for n, p in student.named_parameters() if any(nd in n for nd in no_decay) and p.requires_grad], 'weight_decay': 0.0}
 | 
			
		||||
        ]
 | 
			
		||||
        logger.info("------ Number of trainable parameters (student): %i" % sum([p.numel() for p in self.student.parameters() if p.requires_grad]))
 | 
			
		||||
        logger.info("------ Number of parameters (student): %i" % sum([p.numel() for p in self.student.parameters()]))
 | 
			
		||||
        self.optimizer = AdamW(optimizer_grouped_parameters,
 | 
			
		||||
                               lr=params.learning_rate,
 | 
			
		||||
                               eps=params.adam_epsilon,
 | 
			
		||||
                               betas=(0.9, 0.98))
 | 
			
		||||
 | 
			
		||||
        warmup_steps = math.ceil(num_train_optimization_steps * params.warmup_prop)
 | 
			
		||||
        self.scheduler = WarmupLinearSchedule(self.optimizer,
 | 
			
		||||
                                                warmup_steps=warmup_steps,
 | 
			
		||||
                                                t_total=num_train_optimization_steps)
 | 
			
		||||
 | 
			
		||||
        if self.fp16:
 | 
			
		||||
            try:
 | 
			
		||||
                from apex import amp
 | 
			
		||||
            except ImportError:
 | 
			
		||||
                raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
 | 
			
		||||
            logger.info(f"Using fp16 training: {self.params.fp16_opt_level} level")
 | 
			
		||||
            self.student, self.optimizer = amp.initialize(self.student,
 | 
			
		||||
                                                          self.optimizer,
 | 
			
		||||
                                                          opt_level=self.params.fp16_opt_level)
 | 
			
		||||
            self.teacher = self.teacher.half()
 | 
			
		||||
 | 
			
		||||
        if self.multi_gpu:
 | 
			
		||||
            if self.fp16:
 | 
			
		||||
                from apex.parallel import DistributedDataParallel
 | 
			
		||||
                logger.info("Using apex.parallel.DistributedDataParallel for distributed training.")
 | 
			
		||||
                self.student = DistributedDataParallel(self.student)
 | 
			
		||||
            else:
 | 
			
		||||
                from torch.nn.parallel import DistributedDataParallel
 | 
			
		||||
                logger.info("Using nn.parallel.DistributedDataParallel for distributed training.")
 | 
			
		||||
                self.student = DistributedDataParallel(self.student,
 | 
			
		||||
                                                       device_ids=[params.local_rank],
 | 
			
		||||
                                                       output_device=params.local_rank)
 | 
			
		||||
 | 
			
		||||
        self.is_master = params.is_master
 | 
			
		||||
        if self.is_master:
 | 
			
		||||
            logger.info('--- Initializing Tensorboard')
 | 
			
		||||
            self.tensorboard = SummaryWriter(log_dir=os.path.join(self.dump_path, 'log', 'train'))
 | 
			
		||||
            self.tensorboard.add_text(tag='config', text_string=str(self.params), global_step=0)
 | 
			
		||||
 | 
			
		||||
    def get_iterator(self,
 | 
			
		||||
                     seed: int = None):
 | 
			
		||||
        """
 | 
			
		||||
        Initialize the data iterator.
 | 
			
		||||
        Each process has its own data iterator (iterating on his own random portion of the dataset).
 | 
			
		||||
 | 
			
		||||
        Input:
 | 
			
		||||
        ------
 | 
			
		||||
            seed: `int` - The random seed.
 | 
			
		||||
        """
 | 
			
		||||
        logger.info('--- Initializing Data Iterator')
 | 
			
		||||
        self.data_iterator = self.dataloader.get_iterator(seed=seed)
 | 
			
		||||
 | 
			
		||||
    def get_batch(self):
 | 
			
		||||
        """
 | 
			
		||||
        Call the data iterator to output a new batch.
 | 
			
		||||
        If the data iterator went through the whole dataset, create a new iterator.
 | 
			
		||||
        """
 | 
			
		||||
        assert hasattr(self, 'data_iterator')
 | 
			
		||||
        try:
 | 
			
		||||
            x = next(self.data_iterator)
 | 
			
		||||
        except StopIteration:
 | 
			
		||||
            logger.warning('--- Went through the whole dataset. Creating new data iterator.')
 | 
			
		||||
            self.data_iterator = self.dataloader.get_iterator()
 | 
			
		||||
            x = next(self.data_iterator)
 | 
			
		||||
        return x
 | 
			
		||||
 | 
			
		||||
    def prepare_batch(self,
 | 
			
		||||
                      batch):
 | 
			
		||||
        """
 | 
			
		||||
        Prepare the batch: from the token_ids and the lenghts, compute the attention mask and the masked label for MLM.
 | 
			
		||||
 | 
			
		||||
        Input:
 | 
			
		||||
        ------
 | 
			
		||||
            batch: `Tuple`
 | 
			
		||||
                token_ids: `torch.tensor(bs, seq_length)` - The token ids for each of the sequence. It is padded.
 | 
			
		||||
                lengths: `torch.tensor(bs)` - The lengths of each of the sequences in the batch.
 | 
			
		||||
 | 
			
		||||
        Output:
 | 
			
		||||
        -------
 | 
			
		||||
            token_ids: `torch.tensor(bs, seq_length)` - The token ids after the modifications for MLM.
 | 
			
		||||
            attn_mask: `torch.tensor(bs, seq_length)` - The attention mask for the self-attention.
 | 
			
		||||
            mlm_labels: `torch.tensor(bs, seq_length)` - The masked languge modeling labels. There is a -1 where there is nothing to predict.
 | 
			
		||||
        """
 | 
			
		||||
        token_ids, lengths = batch
 | 
			
		||||
        token_ids, lengths = self.round_batch(x=token_ids, lengths=lengths)
 | 
			
		||||
        assert token_ids.size(0) == lengths.size(0)
 | 
			
		||||
 | 
			
		||||
        attn_mask = (torch.arange(token_ids.size(1), dtype=torch.long, device=lengths.device) < lengths[:, None])
 | 
			
		||||
 | 
			
		||||
        bs, max_seq_len = token_ids.size()
 | 
			
		||||
        mlm_labels = token_ids.new(token_ids.size()).copy_(token_ids)
 | 
			
		||||
 | 
			
		||||
        x_prob = self.token_probs[token_ids.flatten()]
 | 
			
		||||
        n_tgt = math.ceil(self.mlm_mask_prop * lengths.sum().item())
 | 
			
		||||
        tgt_ids = torch.multinomial(x_prob / x_prob.sum(), n_tgt, replacement=False)
 | 
			
		||||
        pred_mask = torch.zeros(bs * max_seq_len, dtype=torch.bool, device=token_ids.device) # previously `dtype=torch.uint8`, cf pytorch 1.2.0 compatibility
 | 
			
		||||
        pred_mask[tgt_ids] = 1
 | 
			
		||||
        pred_mask = pred_mask.view(bs, max_seq_len)
 | 
			
		||||
 | 
			
		||||
        pred_mask[token_ids == self.params.special_tok_ids['pad_token']] = 0
 | 
			
		||||
 | 
			
		||||
        # mask a number of words == 0 [8] (faster with fp16)
 | 
			
		||||
        if self.fp16:
 | 
			
		||||
            n1 = pred_mask.sum().item()
 | 
			
		||||
            if n1 > 8:
 | 
			
		||||
                pred_mask = pred_mask.view(-1)
 | 
			
		||||
                n2 = max(n1 % 8, 8 * (n1 // 8))
 | 
			
		||||
                if n2 != n1:
 | 
			
		||||
                    pred_mask[torch.nonzero(pred_mask).view(-1)[:n1-n2]] = 0
 | 
			
		||||
                pred_mask = pred_mask.view(bs, max_seq_len)
 | 
			
		||||
                assert pred_mask.sum().item() % 8 == 0, pred_mask.sum().item()
 | 
			
		||||
 | 
			
		||||
        _token_ids_real = token_ids[pred_mask]
 | 
			
		||||
        _token_ids_rand = _token_ids_real.clone().random_(self.params.vocab_size)
 | 
			
		||||
        _token_ids_mask = _token_ids_real.clone().fill_(self.params.special_tok_ids['mask_token'])
 | 
			
		||||
        probs = torch.multinomial(self.pred_probs, len(_token_ids_real), replacement=True)
 | 
			
		||||
        _token_ids = _token_ids_mask * (probs == 0).long() + _token_ids_real * (probs == 1).long() + _token_ids_rand * (probs == 2).long()
 | 
			
		||||
        token_ids = token_ids.masked_scatter(pred_mask, _token_ids)
 | 
			
		||||
 | 
			
		||||
        mlm_labels[~pred_mask] = -1 # previously `mlm_labels[1-pred_mask] = -1`, cf pytorch 1.2.0 compatibility
 | 
			
		||||
 | 
			
		||||
        return token_ids, attn_mask, mlm_labels
 | 
			
		||||
 | 
			
		||||
    def round_batch(self,
 | 
			
		||||
                    x: torch.tensor,
 | 
			
		||||
                    lengths: torch.tensor):
 | 
			
		||||
        """
 | 
			
		||||
        For float16 only.
 | 
			
		||||
        Sub-sample sentences in a batch, and add padding, so that each dimension is a multiple of 8.
 | 
			
		||||
 | 
			
		||||
        Input:
 | 
			
		||||
        ------
 | 
			
		||||
            x: `torch.tensor(bs, seq_length)` - The token ids.
 | 
			
		||||
            lengths: `torch.tensor(bs, seq_length)` - The lengths of each of the sequence in the batch.
 | 
			
		||||
 | 
			
		||||
        Output:
 | 
			
		||||
        -------
 | 
			
		||||
            x:  `torch.tensor(new_bs, new_seq_length)` - The updated token ids.
 | 
			
		||||
            lengths: `torch.tensor(new_bs, new_seq_length)` - The updated lengths.
 | 
			
		||||
        """
 | 
			
		||||
        if not self.fp16 or len(lengths) < 8:
 | 
			
		||||
            return x, lengths
 | 
			
		||||
 | 
			
		||||
        # number of sentences == 0 [8]
 | 
			
		||||
        bs1 = len(lengths)
 | 
			
		||||
        bs2 = 8 * (bs1 // 8)
 | 
			
		||||
        assert bs2 > 0 and bs2 % 8 == 0
 | 
			
		||||
        if bs1 != bs2:
 | 
			
		||||
            idx = torch.randperm(bs1)[:bs2]
 | 
			
		||||
            lengths = lengths[idx]
 | 
			
		||||
            slen = lengths.max().item()
 | 
			
		||||
            x = x[idx, :slen]
 | 
			
		||||
        else:
 | 
			
		||||
            idx = None
 | 
			
		||||
 | 
			
		||||
        # sequence length == 0 [8]
 | 
			
		||||
        ml1 = x.size(1)
 | 
			
		||||
        if ml1 % 8 != 0:
 | 
			
		||||
            pad = 8 - (ml1 % 8)
 | 
			
		||||
            ml2 = ml1 + pad
 | 
			
		||||
            pad_id = self.params.special_tok_ids['pad_token']
 | 
			
		||||
            padding_tensor = torch.zeros(bs2, pad, dtype=torch.long, device=x.device).fill_(pad_id)
 | 
			
		||||
            x = torch.cat([x, padding_tensor], 1)
 | 
			
		||||
            assert x.size() == (bs2, ml2)
 | 
			
		||||
 | 
			
		||||
        assert x.size(0) % 8 == 0
 | 
			
		||||
        assert x.size(1) % 8 == 0
 | 
			
		||||
        return x, lengths
 | 
			
		||||
 | 
			
		||||
    def train(self):
 | 
			
		||||
        """
 | 
			
		||||
        The real training loop.
 | 
			
		||||
        """
 | 
			
		||||
        if self.is_master: logger.info('Starting training')
 | 
			
		||||
        self.last_log = time.time()
 | 
			
		||||
        self.student.train()
 | 
			
		||||
        self.teacher.eval()
 | 
			
		||||
 | 
			
		||||
        for _ in range(self.params.n_epoch):
 | 
			
		||||
            if self.is_master: logger.info(f'--- Starting epoch {self.epoch}/{self.params.n_epoch-1}')
 | 
			
		||||
            if self.multi_gpu:
 | 
			
		||||
                torch.distributed.barrier()
 | 
			
		||||
 | 
			
		||||
            iter_bar = trange(self.num_steps_epoch, desc="-Iter", disable=self.params.local_rank not in [-1, 0])
 | 
			
		||||
            for __ in range(self.num_steps_epoch):
 | 
			
		||||
                batch = self.get_batch()
 | 
			
		||||
                if self.params.n_gpu > 0:
 | 
			
		||||
                    batch = tuple(t.to(f'cuda:{self.params.local_rank}') for t in batch)
 | 
			
		||||
                token_ids, attn_mask, mlm_labels = self.prepare_batch(batch=batch)
 | 
			
		||||
 | 
			
		||||
                self.step(input_ids=token_ids, attention_mask=attn_mask, mlm_labels=mlm_labels)
 | 
			
		||||
 | 
			
		||||
                iter_bar.update()
 | 
			
		||||
                iter_bar.set_postfix({'Last_loss': f'{self.last_loss:.2f}',
 | 
			
		||||
                                      'Avg_cum_loss': f'{self.total_loss_epoch/self.n_iter:.2f}'})
 | 
			
		||||
            iter_bar.close()
 | 
			
		||||
 | 
			
		||||
            if self.is_master: logger.info(f'--- Ending epoch {self.epoch}/{self.params.n_epoch-1}')
 | 
			
		||||
            self.end_epoch()
 | 
			
		||||
 | 
			
		||||
        if self.is_master:
 | 
			
		||||
            logger.info(f'Save very last checkpoint as `pytorch_model.bin`.')
 | 
			
		||||
            self.save_checkpoint(checkpoint_name=f'pytorch_model.bin')
 | 
			
		||||
            logger.info('Training is finished')
 | 
			
		||||
 | 
			
		||||
    def step(self,
 | 
			
		||||
             input_ids: torch.tensor,
 | 
			
		||||
             attention_mask: torch.tensor,
 | 
			
		||||
             mlm_labels: torch.tensor):
 | 
			
		||||
        """
 | 
			
		||||
        One optimization step: forward of student AND teacher, backward on the loss (for gradient accumulation),
 | 
			
		||||
        and possibly a parameter update (depending on the gradient accumulation).
 | 
			
		||||
 | 
			
		||||
        Input:
 | 
			
		||||
        ------
 | 
			
		||||
        input_ids: `torch.tensor(bs, seq_length)` - The token ids.
 | 
			
		||||
        attention_mask: `torch.tensor(bs, seq_length)` - The attention mask for self attention.
 | 
			
		||||
        mlm_labels: `torch.tensor(bs, seq_length)` - The masked language modeling labels.
 | 
			
		||||
        """
 | 
			
		||||
        s_logits, s_hidden_states = self.student(input_ids=input_ids, attention_mask=attention_mask)     # (bs, seq_length, voc_size)
 | 
			
		||||
        with torch.no_grad():
 | 
			
		||||
            t_logits, t_hidden_states = self.teacher(input_ids=input_ids, attention_mask=attention_mask) # (bs, seq_length, voc_size)
 | 
			
		||||
        assert s_logits.size() == t_logits.size()
 | 
			
		||||
 | 
			
		||||
        #https://github.com/peterliht/knowledge-distillation-pytorch/blob/master/model/net.py#L100
 | 
			
		||||
        #https://github.com/peterliht/knowledge-distillation-pytorch/issues/2
 | 
			
		||||
        if self.params.restrict_ce_to_mask:
 | 
			
		||||
            mask = (mlm_labels>-1).unsqueeze(-1).expand_as(s_logits)   # (bs, seq_lenth, voc_size)
 | 
			
		||||
        else:
 | 
			
		||||
            mask = attention_mask.unsqueeze(-1).expand_as(s_logits)    # (bs, seq_lenth, voc_size)
 | 
			
		||||
        s_logits_slct = torch.masked_select(s_logits, mask)            # (bs * seq_length * voc_size) modulo the 1s in mask
 | 
			
		||||
        s_logits_slct = s_logits_slct.view(-1, s_logits.size(-1))      # (bs * seq_length, voc_size) modulo the 1s in mask
 | 
			
		||||
        t_logits_slct = torch.masked_select(t_logits, mask)            # (bs * seq_length * voc_size) modulo the 1s in mask
 | 
			
		||||
        t_logits_slct = t_logits_slct.view(-1, s_logits.size(-1))      # (bs * seq_length, voc_size) modulo the 1s in mask
 | 
			
		||||
        assert t_logits_slct.size() == s_logits_slct.size()
 | 
			
		||||
 | 
			
		||||
        loss_ce = self.ce_loss_fct(F.log_softmax(s_logits_slct/self.temperature, dim=-1),
 | 
			
		||||
                                   F.softmax(t_logits_slct/self.temperature, dim=-1)) * (self.temperature)**2
 | 
			
		||||
        loss = self.alpha_ce*loss_ce
 | 
			
		||||
        if self.alpha_mlm > 0.:
 | 
			
		||||
            loss_mlm = self.mlm_loss_fct(s_logits.view(-1, s_logits.size(-1)), mlm_labels.view(-1))
 | 
			
		||||
            loss += self.alpha_mlm * loss_mlm
 | 
			
		||||
        if self.alpha_mse > 0.:
 | 
			
		||||
            loss_mse = self.mse_loss_fct(s_logits_slct, t_logits_slct)/s_logits_slct.size(0) # Reproducing batchmean reduction
 | 
			
		||||
            loss += self.alpha_mse * loss_mse
 | 
			
		||||
        
 | 
			
		||||
        if self.alpha_cos > 0.:
 | 
			
		||||
            s_hidden_states = s_hidden_states[-1]                              # (bs, seq_length, dim)
 | 
			
		||||
            t_hidden_states = t_hidden_states[-1]                              # (bs, seq_length, dim)
 | 
			
		||||
            mask = attention_mask.unsqueeze(-1).expand_as(s_hidden_states)     # (bs, seq_length, dim)
 | 
			
		||||
            assert s_hidden_states.size() == t_hidden_states.size()
 | 
			
		||||
            dim = s_hidden_states.size(-1)
 | 
			
		||||
            
 | 
			
		||||
            s_hidden_states_slct = torch.masked_select(s_hidden_states, mask)        # (bs * seq_length * dim)
 | 
			
		||||
            s_hidden_states_slct = s_hidden_states_slct.view(-1, dim)                # (bs * seq_length, dim)
 | 
			
		||||
            t_hidden_states_slct = torch.masked_select(t_hidden_states, mask)        # (bs * seq_length * dim)
 | 
			
		||||
            t_hidden_states_slct = t_hidden_states_slct.view(-1, dim)                # (bs * seq_length, dim)
 | 
			
		||||
        
 | 
			
		||||
            target = s_hidden_states_slct.new(s_hidden_states_slct.size(0)).fill_(1) # (bs * seq_length,)
 | 
			
		||||
            loss_cos = self.cosine_loss_fct(s_hidden_states_slct, t_hidden_states_slct, target)
 | 
			
		||||
            loss += self.alpha_cos * loss_cos
 | 
			
		||||
 | 
			
		||||
        self.total_loss_epoch += loss.item()
 | 
			
		||||
        self.last_loss = loss.item()
 | 
			
		||||
        self.last_loss_ce = loss_ce.item()
 | 
			
		||||
        if self.alpha_mlm > 0.:
 | 
			
		||||
            self.last_loss_mlm = loss_mlm.item()
 | 
			
		||||
        if self.alpha_mse > 0.:
 | 
			
		||||
            self.last_loss_mse = loss_mse.item()
 | 
			
		||||
        if self.alpha_cos > 0.:
 | 
			
		||||
            self.last_loss_cos = loss_cos.item()
 | 
			
		||||
 | 
			
		||||
        self.optimize(loss)
 | 
			
		||||
 | 
			
		||||
        self.n_sequences_epoch += input_ids.size(0)
 | 
			
		||||
 | 
			
		||||
    def optimize(self,
 | 
			
		||||
                 loss):
 | 
			
		||||
        """
 | 
			
		||||
        Normalization on the loss (gradient accumulation or distributed training), followed by
 | 
			
		||||
        backward pass on the loss, possibly followed by a parameter update (depending on the gradient accumulation).
 | 
			
		||||
        Also update the metrics for tensorboard.
 | 
			
		||||
        """
 | 
			
		||||
        # Check for NaN
 | 
			
		||||
        if (loss != loss).data.any():
 | 
			
		||||
            logger.error('NaN detected')
 | 
			
		||||
            exit()
 | 
			
		||||
 | 
			
		||||
        if self.multi_gpu:
 | 
			
		||||
            loss = loss.mean()
 | 
			
		||||
        if self.params.gradient_accumulation_steps > 1:
 | 
			
		||||
            loss = loss / self.params.gradient_accumulation_steps
 | 
			
		||||
 | 
			
		||||
        if self.fp16:
 | 
			
		||||
            from apex import amp
 | 
			
		||||
            with amp.scale_loss(loss, self.optimizer) as scaled_loss:
 | 
			
		||||
                scaled_loss.backward()
 | 
			
		||||
        else:
 | 
			
		||||
            loss.backward()
 | 
			
		||||
 | 
			
		||||
        self.iter()
 | 
			
		||||
        if self.n_iter % self.params.gradient_accumulation_steps == 0:
 | 
			
		||||
            if self.fp16:
 | 
			
		||||
                torch.nn.utils.clip_grad_norm_(amp.master_params(self.optimizer), self.params.max_grad_norm)
 | 
			
		||||
            else:
 | 
			
		||||
                torch.nn.utils.clip_grad_norm_(self.student.parameters(), self.params.max_grad_norm)
 | 
			
		||||
            self.optimizer.step()
 | 
			
		||||
            self.optimizer.zero_grad()
 | 
			
		||||
            self.scheduler.step()
 | 
			
		||||
 | 
			
		||||
    def iter(self):
 | 
			
		||||
        """
 | 
			
		||||
        Update global counts, write to tensorboard and save checkpoint.
 | 
			
		||||
        """
 | 
			
		||||
        self.n_iter += 1
 | 
			
		||||
        self.n_total_iter += 1
 | 
			
		||||
 | 
			
		||||
        if self.n_total_iter % self.params.log_interval == 0:
 | 
			
		||||
            self.log_tensorboard()
 | 
			
		||||
            self.last_log = time.time()
 | 
			
		||||
        if self.n_total_iter % self.params.checkpoint_interval == 0:
 | 
			
		||||
            self.save_checkpoint()
 | 
			
		||||
 | 
			
		||||
    def log_tensorboard(self):
 | 
			
		||||
        """
 | 
			
		||||
        Log into tensorboard. Only by the master process.
 | 
			
		||||
        """
 | 
			
		||||
        if not self.is_master:
 | 
			
		||||
            return
 | 
			
		||||
 | 
			
		||||
        for param_name, param in self.student.named_parameters():
 | 
			
		||||
            self.tensorboard.add_scalar(tag='parameter_mean/' + param_name, scalar_value=param.data.mean(), global_step=self.n_total_iter)
 | 
			
		||||
            self.tensorboard.add_scalar(tag='parameter_std/' + param_name, scalar_value=param.data.std(), global_step=self.n_total_iter)
 | 
			
		||||
            if param.grad is None:
 | 
			
		||||
                continue
 | 
			
		||||
            self.tensorboard.add_scalar(tag="grad_mean/" + param_name, scalar_value=param.grad.data.mean(),global_step=self.n_total_iter)
 | 
			
		||||
            self.tensorboard.add_scalar(tag="grad_std/" + param_name, scalar_value=param.grad.data.std(), global_step=self.n_total_iter)
 | 
			
		||||
 | 
			
		||||
        self.tensorboard.add_scalar(tag="losses/cum_avg_loss_epoch", scalar_value=self.total_loss_epoch/self.n_iter, global_step=self.n_total_iter)
 | 
			
		||||
        self.tensorboard.add_scalar(tag="losses/loss", scalar_value=self.last_loss, global_step=self.n_total_iter)
 | 
			
		||||
        self.tensorboard.add_scalar(tag="losses/loss_ce", scalar_value=self.last_loss_ce, global_step=self.n_total_iter)
 | 
			
		||||
        if self.alpha_mlm > 0.:
 | 
			
		||||
            self.tensorboard.add_scalar(tag="losses/loss_mlm", scalar_value=self.last_loss_mlm, global_step=self.n_total_iter)
 | 
			
		||||
        if self.alpha_mse > 0.:
 | 
			
		||||
            self.tensorboard.add_scalar(tag="losses/loss_mse", scalar_value=self.last_loss_mse, global_step=self.n_total_iter)
 | 
			
		||||
        if self.alpha_cos > 0.:
 | 
			
		||||
            self.tensorboard.add_scalar(tag="losses/loss_cos", scalar_value=self.last_loss_cos, global_step=self.n_total_iter)
 | 
			
		||||
        self.tensorboard.add_scalar(tag="learning_rate/lr", scalar_value=self.scheduler.get_lr()[0], global_step=self.n_total_iter)
 | 
			
		||||
        
 | 
			
		||||
        self.tensorboard.add_scalar(tag="global/memory_usage", scalar_value=psutil.virtual_memory()._asdict()['used']/1_000_000, global_step=self.n_total_iter)
 | 
			
		||||
        self.tensorboard.add_scalar(tag="global/speed", scalar_value=time.time()-self.last_log, global_step=self.n_total_iter)
 | 
			
		||||
 | 
			
		||||
    def end_epoch(self):
 | 
			
		||||
        """
 | 
			
		||||
        Finally arrived at the end of epoch (full pass on dataset).
 | 
			
		||||
        Do some tensorboard logging and checkpoint saving.
 | 
			
		||||
        """
 | 
			
		||||
        logger.info(f'{self.n_sequences_epoch} sequences have been trained during this epoch.')
 | 
			
		||||
 | 
			
		||||
        if self.is_master:
 | 
			
		||||
            self.save_checkpoint(checkpoint_name=f'model_epoch_{self.epoch}.pth')
 | 
			
		||||
            self.tensorboard.add_scalar(tag='epoch/loss', scalar_value=self.total_loss_epoch/self.n_iter, global_step=self.epoch)
 | 
			
		||||
 | 
			
		||||
        self.epoch += 1
 | 
			
		||||
        self.n_sequences_epoch = 0
 | 
			
		||||
        self.n_iter = 0
 | 
			
		||||
        self.total_loss_epoch = 0
 | 
			
		||||
 | 
			
		||||
    def save_checkpoint(self,
 | 
			
		||||
                        checkpoint_name: str = 'checkpoint.pth'):
 | 
			
		||||
        """
 | 
			
		||||
        Save the current state. Only by the master process.
 | 
			
		||||
        """
 | 
			
		||||
        if not self.is_master:
 | 
			
		||||
            return
 | 
			
		||||
        mdl_to_save = self.student.module if hasattr(self.student, 'module') else self.student
 | 
			
		||||
        mdl_to_save.config.save_pretrained(self.dump_path)
 | 
			
		||||
        state_dict = mdl_to_save.state_dict()
 | 
			
		||||
        torch.save(state_dict, os.path.join(self.dump_path, checkpoint_name))
 | 
			
		||||
							
								
								
									
										6
									
								
								examples/distillation/requirements.txt
									
									
									
									
									
										Normal file
									
								
							
							
						
						@ -0,0 +1,6 @@
 | 
			
		||||
gitpython==3.0.2
 | 
			
		||||
tensorboard>=1.14.0
 | 
			
		||||
tensorboardX==1.8
 | 
			
		||||
psutil==5.6.3
 | 
			
		||||
scipy==1.3.1
 | 
			
		||||
pytorch_transformers==1.2.0
 | 
			
		||||
							
								
								
									
										86
									
								
								examples/distillation/scripts/binarized_data.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						@ -0,0 +1,86 @@
 | 
			
		||||
# coding=utf-8
 | 
			
		||||
# Copyright 2019-present, the HuggingFace Inc. team.
 | 
			
		||||
#
 | 
			
		||||
# Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
# you may not use this file except in compliance with the License.
 | 
			
		||||
# You may obtain a copy of the License at
 | 
			
		||||
#
 | 
			
		||||
#     http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
#
 | 
			
		||||
# Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
# distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
# See the License for the specific language governing permissions and
 | 
			
		||||
# limitations under the License.
 | 
			
		||||
"""
 | 
			
		||||
Preprocessing script before training DistilBERT.
 | 
			
		||||
"""
 | 
			
		||||
import argparse
 | 
			
		||||
import pickle
 | 
			
		||||
import random
 | 
			
		||||
import time
 | 
			
		||||
import numpy as np
 | 
			
		||||
from transformers import BertTokenizer, RobertaTokenizer
 | 
			
		||||
import logging
 | 
			
		||||
 | 
			
		||||
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s -   %(message)s',
 | 
			
		||||
                    datefmt = '%m/%d/%Y %H:%M:%S',
 | 
			
		||||
                    level = logging.INFO)
 | 
			
		||||
logger = logging.getLogger(__name__)
 | 
			
		||||
 | 
			
		||||
def main():
 | 
			
		||||
    parser = argparse.ArgumentParser(description="Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).")
 | 
			
		||||
    parser.add_argument('--file_path', type=str, default='data/dump.txt',
 | 
			
		||||
                        help='The path to the data.')
 | 
			
		||||
    parser.add_argument('--tokenizer_type', type=str, default='bert', choices=['bert', 'roberta'])
 | 
			
		||||
    parser.add_argument('--tokenizer_name', type=str, default='bert-base-uncased',
 | 
			
		||||
                        help="The tokenizer to use.")
 | 
			
		||||
    parser.add_argument('--dump_file', type=str, default='data/dump',
 | 
			
		||||
                        help='The dump file prefix.')
 | 
			
		||||
    args = parser.parse_args()
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    logger.info(f'Loading Tokenizer ({args.tokenizer_name})')
 | 
			
		||||
    if args.tokenizer_type == 'bert':
 | 
			
		||||
        tokenizer = BertTokenizer.from_pretrained(args.tokenizer_name)
 | 
			
		||||
    elif args.tokenizer_type == 'roberta':
 | 
			
		||||
        tokenizer = RobertaTokenizer.from_pretrained(args.tokenizer_name)
 | 
			
		||||
    bos = tokenizer.special_tokens_map['bos_token'] # `[CLS]` for bert, `<s>` for roberta
 | 
			
		||||
    sep = tokenizer.special_tokens_map['sep_token'] # `[SEP]` for bert, `</s>` for roberta
 | 
			
		||||
 | 
			
		||||
    logger.info(f'Loading text from {args.file_path}')
 | 
			
		||||
    with open(args.file_path, 'r', encoding='utf8') as fp:
 | 
			
		||||
        data = fp.readlines()
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    logger.info(f'Start encoding')
 | 
			
		||||
    logger.info(f'{len(data)} examples to process.')
 | 
			
		||||
 | 
			
		||||
    rslt = []
 | 
			
		||||
    iter = 0
 | 
			
		||||
    interval = 10000
 | 
			
		||||
    start = time.time()
 | 
			
		||||
    for text in data:
 | 
			
		||||
        text = f'{bos} {text.strip()} {sep}'
 | 
			
		||||
        token_ids = tokenizer.encode(text)
 | 
			
		||||
        rslt.append(token_ids)
 | 
			
		||||
 | 
			
		||||
        iter += 1
 | 
			
		||||
        if iter % interval == 0:
 | 
			
		||||
            end = time.time()
 | 
			
		||||
            logger.info(f'{iter} examples processed. - {(end-start)/interval:.2f}s/expl')
 | 
			
		||||
            start = time.time()
 | 
			
		||||
    logger.info('Finished binarization')
 | 
			
		||||
    logger.info(f'{len(data)} examples processed.')
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    dp_file = f'{args.dump_file}.{args.tokenizer_name}.pickle'
 | 
			
		||||
    rslt_ = [np.uint16(d) for d in rslt]
 | 
			
		||||
    random.shuffle(rslt_)
 | 
			
		||||
    logger.info(f'Dump to {dp_file}')
 | 
			
		||||
    with open(dp_file, 'wb') as handle:
 | 
			
		||||
        pickle.dump(rslt_, handle, protocol=pickle.HIGHEST_PROTOCOL)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
if __name__ == "__main__":
 | 
			
		||||
    main()
 | 
			
		||||
							
								
								
									
										90
									
								
								examples/distillation/scripts/extract_for_distil.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						@ -0,0 +1,90 @@
 | 
			
		||||
# coding=utf-8
 | 
			
		||||
# Copyright 2019-present, the HuggingFace Inc. team.
 | 
			
		||||
#
 | 
			
		||||
# Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
# you may not use this file except in compliance with the License.
 | 
			
		||||
# You may obtain a copy of the License at
 | 
			
		||||
#
 | 
			
		||||
#     http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
#
 | 
			
		||||
# Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
# distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
# See the License for the specific language governing permissions and
 | 
			
		||||
# limitations under the License.
 | 
			
		||||
"""
 | 
			
		||||
Preprocessing script before training DistilBERT.
 | 
			
		||||
"""
 | 
			
		||||
from transformers import BertForMaskedLM, RobertaForMaskedLM
 | 
			
		||||
import torch
 | 
			
		||||
import argparse
 | 
			
		||||
 | 
			
		||||
if __name__ == '__main__':
 | 
			
		||||
    parser = argparse.ArgumentParser(description="Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned Distillation")
 | 
			
		||||
    parser.add_argument("--model_type", default="bert", choices=["bert", "roberta"])
 | 
			
		||||
    parser.add_argument("--model_name", default='bert-base-uncased', type=str)
 | 
			
		||||
    parser.add_argument("--dump_checkpoint", default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str)
 | 
			
		||||
    parser.add_argument("--vocab_transform", action='store_true')
 | 
			
		||||
    args = parser.parse_args()
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    if args.model_type == 'bert':
 | 
			
		||||
        model = BertForMaskedLM.from_pretrained(args.model_name)
 | 
			
		||||
        prefix = 'bert'
 | 
			
		||||
    elif args.model_type == 'roberta':
 | 
			
		||||
        model = RobertaForMaskedLM.from_pretrained(args.model_name)
 | 
			
		||||
        prefix = 'roberta'
 | 
			
		||||
 | 
			
		||||
    state_dict = model.state_dict()
 | 
			
		||||
    compressed_sd = {}
 | 
			
		||||
 | 
			
		||||
    for w in ['word_embeddings', 'position_embeddings']:
 | 
			
		||||
        compressed_sd[f'distilbert.embeddings.{w}.weight'] = \
 | 
			
		||||
            state_dict[f'{prefix}.embeddings.{w}.weight']
 | 
			
		||||
    for w in ['weight', 'bias']:
 | 
			
		||||
        compressed_sd[f'distilbert.embeddings.LayerNorm.{w}'] = \
 | 
			
		||||
            state_dict[f'{prefix}.embeddings.LayerNorm.{w}']
 | 
			
		||||
 | 
			
		||||
    std_idx = 0
 | 
			
		||||
    for teacher_idx in [0, 2, 4, 7, 9, 11]:
 | 
			
		||||
        for w in ['weight', 'bias']:
 | 
			
		||||
            compressed_sd[f'distilbert.transformer.layer.{std_idx}.attention.q_lin.{w}'] = \
 | 
			
		||||
                state_dict[f'{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}']
 | 
			
		||||
            compressed_sd[f'distilbert.transformer.layer.{std_idx}.attention.k_lin.{w}'] = \
 | 
			
		||||
                state_dict[f'{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}']
 | 
			
		||||
            compressed_sd[f'distilbert.transformer.layer.{std_idx}.attention.v_lin.{w}'] = \
 | 
			
		||||
                state_dict[f'{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}']
 | 
			
		||||
 | 
			
		||||
            compressed_sd[f'distilbert.transformer.layer.{std_idx}.attention.out_lin.{w}'] = \
 | 
			
		||||
                state_dict[f'{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}']
 | 
			
		||||
            compressed_sd[f'distilbert.transformer.layer.{std_idx}.sa_layer_norm.{w}'] = \
 | 
			
		||||
                state_dict[f'{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}']
 | 
			
		||||
 | 
			
		||||
            compressed_sd[f'distilbert.transformer.layer.{std_idx}.ffn.lin1.{w}'] = \
 | 
			
		||||
                state_dict[f'{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}']
 | 
			
		||||
            compressed_sd[f'distilbert.transformer.layer.{std_idx}.ffn.lin2.{w}'] = \
 | 
			
		||||
                state_dict[f'{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}']
 | 
			
		||||
            compressed_sd[f'distilbert.transformer.layer.{std_idx}.output_layer_norm.{w}'] = \
 | 
			
		||||
                state_dict[f'{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}']
 | 
			
		||||
        std_idx += 1
 | 
			
		||||
 | 
			
		||||
    if args.model_type == 'bert':
 | 
			
		||||
        compressed_sd[f'vocab_projector.weight'] = state_dict[f'cls.predictions.decoder.weight']
 | 
			
		||||
        compressed_sd[f'vocab_projector.bias'] = state_dict[f'cls.predictions.bias']
 | 
			
		||||
        if args.vocab_transform:
 | 
			
		||||
            for w in ['weight', 'bias']:
 | 
			
		||||
                compressed_sd[f'vocab_transform.{w}'] = state_dict[f'cls.predictions.transform.dense.{w}']
 | 
			
		||||
                compressed_sd[f'vocab_layer_norm.{w}'] = state_dict[f'cls.predictions.transform.LayerNorm.{w}']
 | 
			
		||||
    elif args.model_type == 'roberta':
 | 
			
		||||
        compressed_sd[f'vocab_projector.weight'] = state_dict[f'lm_head.decoder.weight']
 | 
			
		||||
        compressed_sd[f'vocab_projector.bias'] = state_dict[f'lm_head.bias']
 | 
			
		||||
        if args.vocab_transform:
 | 
			
		||||
            for w in ['weight', 'bias']:
 | 
			
		||||
                compressed_sd[f'vocab_transform.{w}'] = state_dict[f'lm_head.dense.{w}']
 | 
			
		||||
                compressed_sd[f'vocab_layer_norm.{w}'] = state_dict[f'lm_head.layer_norm.{w}']
 | 
			
		||||
 | 
			
		||||
    print(f'N layers selected for distillation: {std_idx}')
 | 
			
		||||
    print(f'Number of params transfered for distillation: {len(compressed_sd.keys())}')
 | 
			
		||||
 | 
			
		||||
    print(f'Save transfered checkpoint to {args.dump_checkpoint}.')
 | 
			
		||||
    torch.save(compressed_sd, args.dump_checkpoint)
 | 
			
		||||
							
								
								
									
										51
									
								
								examples/distillation/scripts/token_counts.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						@ -0,0 +1,51 @@
 | 
			
		||||
# coding=utf-8
 | 
			
		||||
# Copyright 2019-present, the HuggingFace Inc. team.
 | 
			
		||||
#
 | 
			
		||||
# Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
# you may not use this file except in compliance with the License.
 | 
			
		||||
# You may obtain a copy of the License at
 | 
			
		||||
#
 | 
			
		||||
#     http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
#
 | 
			
		||||
# Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
# distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
# See the License for the specific language governing permissions and
 | 
			
		||||
# limitations under the License.
 | 
			
		||||
"""
 | 
			
		||||
Preprocessing script before training DistilBERT.
 | 
			
		||||
"""
 | 
			
		||||
from collections import Counter
 | 
			
		||||
import argparse
 | 
			
		||||
import pickle
 | 
			
		||||
import logging
 | 
			
		||||
 | 
			
		||||
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s -   %(message)s',
 | 
			
		||||
                    datefmt = '%m/%d/%Y %H:%M:%S',
 | 
			
		||||
                    level = logging.INFO)
 | 
			
		||||
logger = logging.getLogger(__name__)
 | 
			
		||||
 | 
			
		||||
if __name__ == '__main__':
 | 
			
		||||
    parser = argparse.ArgumentParser(description="Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)")
 | 
			
		||||
    parser.add_argument("--data_file", type=str, default="data/dump.bert-base-uncased.pickle",
 | 
			
		||||
                        help="The binarized dataset.")
 | 
			
		||||
    parser.add_argument("--token_counts_dump", type=str, default="data/token_counts.bert-base-uncased.pickle",
 | 
			
		||||
                        help="The dump file.")
 | 
			
		||||
    parser.add_argument("--vocab_size", default=30522, type=int)
 | 
			
		||||
    args = parser.parse_args()
 | 
			
		||||
 | 
			
		||||
    logger.info(f'Loading data from {args.data_file}')
 | 
			
		||||
    with open(args.data_file, 'rb') as fp:
 | 
			
		||||
        data = pickle.load(fp)
 | 
			
		||||
 | 
			
		||||
    logger.info('Counting occurences for MLM.')
 | 
			
		||||
    counter = Counter()
 | 
			
		||||
    for tk_ids in data:
 | 
			
		||||
        counter.update(tk_ids)
 | 
			
		||||
    counts = [0]*args.vocab_size
 | 
			
		||||
    for k, v in counter.items():
 | 
			
		||||
        counts[k] = v
 | 
			
		||||
 | 
			
		||||
    logger.info(f'Dump to {args.token_counts_dump}')
 | 
			
		||||
    with open(args.token_counts_dump, 'wb') as handle:
 | 
			
		||||
        pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
 | 
			
		||||
							
								
								
									
										247
									
								
								examples/distillation/train.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						@ -0,0 +1,247 @@
 | 
			
		||||
# coding=utf-8
 | 
			
		||||
# Copyright 2019-present, the HuggingFace Inc. team.
 | 
			
		||||
#
 | 
			
		||||
# Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
# you may not use this file except in compliance with the License.
 | 
			
		||||
# You may obtain a copy of the License at
 | 
			
		||||
#
 | 
			
		||||
#     http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
#
 | 
			
		||||
# Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
# distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
# See the License for the specific language governing permissions and
 | 
			
		||||
# limitations under the License.
 | 
			
		||||
"""
 | 
			
		||||
Training DistilBERT.
 | 
			
		||||
"""
 | 
			
		||||
import os
 | 
			
		||||
import argparse
 | 
			
		||||
import pickle
 | 
			
		||||
import json
 | 
			
		||||
import shutil
 | 
			
		||||
import numpy as np
 | 
			
		||||
import torch
 | 
			
		||||
 | 
			
		||||
from transformers import BertTokenizer, BertForMaskedLM, RobertaTokenizer, RobertaForMaskedLM
 | 
			
		||||
from transformers import DistilBertForMaskedLM, DistilBertConfig
 | 
			
		||||
 | 
			
		||||
from distiller import Distiller
 | 
			
		||||
from utils import git_log, logger, init_gpu_params, set_seed
 | 
			
		||||
from dataset import Dataset
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def main():
 | 
			
		||||
    parser = argparse.ArgumentParser(description="Training")
 | 
			
		||||
 | 
			
		||||
    parser.add_argument("--dump_path", type=str, required=True,
 | 
			
		||||
                        help="The output directory (log, checkpoints, parameters, etc.)")
 | 
			
		||||
    parser.add_argument("--data_file", type=str, required=True,
 | 
			
		||||
                        help="The binarized file (tokenized + tokens_to_ids) and grouped by sequence.")
 | 
			
		||||
    parser.add_argument("--token_counts", type=str, required=True,
 | 
			
		||||
                        help="The token counts in the data_file for MLM.")
 | 
			
		||||
    parser.add_argument("--force", action='store_true',
 | 
			
		||||
                        help="Overwrite dump_path if it already exists.")
 | 
			
		||||
 | 
			
		||||
    parser.add_argument("--vocab_size", default=30522, type=int,
 | 
			
		||||
                        help="The vocabulary size.")
 | 
			
		||||
    parser.add_argument("--max_position_embeddings", default=512, type=int,
 | 
			
		||||
                        help="Maximum sequence length we can model (including [CLS] and [SEP]).")
 | 
			
		||||
    parser.add_argument("--sinusoidal_pos_embds", action='store_false',
 | 
			
		||||
                        help="If true, the position embeddings are simply fixed with sinusoidal embeddings.")
 | 
			
		||||
    parser.add_argument("--n_layers", default=6, type=int,
 | 
			
		||||
                        help="Number of Transformer blocks.")
 | 
			
		||||
    parser.add_argument("--n_heads", default=12, type=int,
 | 
			
		||||
                        help="Number of heads in the self-attention module.")
 | 
			
		||||
    parser.add_argument("--dim", default=768, type=int,
 | 
			
		||||
                        help="Dimension through the network. Must be divisible by n_heads")
 | 
			
		||||
    parser.add_argument("--hidden_dim", default=3072, type=int,
 | 
			
		||||
                        help="Intermediate dimension in the FFN.")
 | 
			
		||||
    parser.add_argument("--dropout", default=0.1, type=float,
 | 
			
		||||
                        help="Dropout.")
 | 
			
		||||
    parser.add_argument("--attention_dropout", default=0.1, type=float,
 | 
			
		||||
                        help="Dropout in self-attention.")
 | 
			
		||||
    parser.add_argument("--activation", default='gelu', type=str,
 | 
			
		||||
                        help="Activation to use in self-attention")
 | 
			
		||||
    parser.add_argument("--tie_weights_", action='store_false',
 | 
			
		||||
                        help="If true, we tie the embeddings matrix with the projection over the vocabulary matrix. Default is true.")
 | 
			
		||||
 | 
			
		||||
    parser.add_argument("--from_pretrained_weights", default=None, type=str,
 | 
			
		||||
                        help="Load student initialization checkpoint.")
 | 
			
		||||
    parser.add_argument("--from_pretrained_config", default=None, type=str,
 | 
			
		||||
                        help="Load student initialization architecture config.")
 | 
			
		||||
    parser.add_argument("--teacher_type", default="bert", choices=["bert", "roberta"],
 | 
			
		||||
                        help="Teacher type (BERT, RoBERTa).")
 | 
			
		||||
    parser.add_argument("--teacher_name", default="bert-base-uncased", type=str,
 | 
			
		||||
                        help="The teacher model.")
 | 
			
		||||
 | 
			
		||||
    parser.add_argument("--temperature", default=2., type=float,
 | 
			
		||||
                        help="Temperature for the softmax temperature.")
 | 
			
		||||
    parser.add_argument("--alpha_ce", default=0.5, type=float,
 | 
			
		||||
                        help="Linear weight for the distillation loss. Must be >=0.")
 | 
			
		||||
    parser.add_argument("--alpha_mlm", default=0.5, type=float,
 | 
			
		||||
                        help="Linear weight for the MLM loss. Must be >=0.")
 | 
			
		||||
    parser.add_argument("--alpha_mse", default=0.0, type=float,
 | 
			
		||||
                        help="Linear weight of the MSE loss. Must be >=0.")
 | 
			
		||||
    parser.add_argument("--alpha_cos", default=0.0, type=float,
 | 
			
		||||
                        help="Linear weight of the cosine embedding loss. Must be >=0.")
 | 
			
		||||
    parser.add_argument("--mlm_mask_prop", default=0.15, type=float,
 | 
			
		||||
                        help="Proportion of tokens for which we need to make a prediction.")
 | 
			
		||||
    parser.add_argument("--word_mask", default=0.8, type=float,
 | 
			
		||||
                        help="Proportion of tokens to mask out.")
 | 
			
		||||
    parser.add_argument("--word_keep", default=0.1, type=float,
 | 
			
		||||
                        help="Proportion of tokens to keep.")
 | 
			
		||||
    parser.add_argument("--word_rand", default=0.1, type=float,
 | 
			
		||||
                        help="Proportion of tokens to randomly replace.")
 | 
			
		||||
    parser.add_argument("--mlm_smoothing", default=0.7, type=float,
 | 
			
		||||
                        help="Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).")
 | 
			
		||||
    parser.add_argument("--restrict_ce_to_mask", action='store_true',
 | 
			
		||||
                        help="If true, compute the distilation loss only the [MLM] prediction distribution.")
 | 
			
		||||
 | 
			
		||||
    parser.add_argument("--n_epoch", type=int, default=3,
 | 
			
		||||
                        help="Number of pass on the whole dataset.")
 | 
			
		||||
    parser.add_argument("--batch_size", type=int, default=5,
 | 
			
		||||
                        help="Batch size (for each process).")
 | 
			
		||||
    parser.add_argument("--tokens_per_batch", type=int, default=-1,
 | 
			
		||||
                        help="If specified, modify the batches so that they have approximately this number of tokens.")
 | 
			
		||||
    parser.add_argument("--shuffle", action='store_false',
 | 
			
		||||
                        help="If true, shuffle the sequence order. Default is true.")
 | 
			
		||||
    parser.add_argument("--group_by_size", action='store_false',
 | 
			
		||||
                        help="If true, group sequences that have similar length into the same batch. Default is true.")
 | 
			
		||||
 | 
			
		||||
    parser.add_argument("--gradient_accumulation_steps", type=int, default=50,
 | 
			
		||||
                        help="Gradient accumulation for larger training batches.")
 | 
			
		||||
    parser.add_argument("--warmup_prop", default=0.05, type=float,
 | 
			
		||||
                        help="Linear warmup proportion.")
 | 
			
		||||
    parser.add_argument("--weight_decay", default=0.0, type=float,
 | 
			
		||||
                        help="Weight deay if we apply some.")
 | 
			
		||||
    parser.add_argument("--learning_rate", default=5e-4, type=float,
 | 
			
		||||
                        help="The initial learning rate for Adam.")
 | 
			
		||||
    parser.add_argument("--adam_epsilon", default=1e-6, type=float,
 | 
			
		||||
                        help="Epsilon for Adam optimizer.")
 | 
			
		||||
    parser.add_argument("--max_grad_norm", default=5.0, type=float,
 | 
			
		||||
                        help="Max gradient norm.")
 | 
			
		||||
    parser.add_argument("--initializer_range", default=0.02, type=float,
 | 
			
		||||
                        help="Random initialization range.")
 | 
			
		||||
 | 
			
		||||
    parser.add_argument('--fp16', action='store_true',
 | 
			
		||||
                        help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit")
 | 
			
		||||
    parser.add_argument('--fp16_opt_level', type=str, default='O1',
 | 
			
		||||
                        help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
 | 
			
		||||
                             "See details at https://nvidia.github.io/apex/amp.html")
 | 
			
		||||
    parser.add_argument("--n_gpu", type=int, default=1,
 | 
			
		||||
                        help="Number of GPUs in the node.")
 | 
			
		||||
    parser.add_argument("--local_rank", type=int, default=-1,
 | 
			
		||||
                        help="Distributed training - Local rank")
 | 
			
		||||
    parser.add_argument("--seed", type=int, default=56,
 | 
			
		||||
                        help="Random seed")
 | 
			
		||||
 | 
			
		||||
    parser.add_argument("--log_interval", type=int, default=500,
 | 
			
		||||
                        help="Tensorboard logging interval.")
 | 
			
		||||
    parser.add_argument("--checkpoint_interval", type=int, default=4000,
 | 
			
		||||
                        help="Checkpoint interval.")
 | 
			
		||||
    args = parser.parse_args()
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    ## ARGS ##
 | 
			
		||||
    init_gpu_params(args)
 | 
			
		||||
    set_seed(args)
 | 
			
		||||
    if args.is_master:
 | 
			
		||||
        if os.path.exists(args.dump_path):
 | 
			
		||||
            if not args.force:
 | 
			
		||||
                raise ValueError(f'Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite it'
 | 
			
		||||
                                   'Use `--force` if you want to overwrite it')
 | 
			
		||||
            else:
 | 
			
		||||
                shutil.rmtree(args.dump_path)
 | 
			
		||||
 | 
			
		||||
        if not os.path.exists(args.dump_path):
 | 
			
		||||
            os.makedirs(args.dump_path)
 | 
			
		||||
        logger.info(f'Experiment will be dumped and logged in {args.dump_path}')
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
        ### SAVE PARAMS ###
 | 
			
		||||
        logger.info(f'Param: {args}')
 | 
			
		||||
        with open(os.path.join(args.dump_path, 'parameters.json'), 'w') as f:
 | 
			
		||||
            json.dump(vars(args), f, indent=4)
 | 
			
		||||
        git_log(args.dump_path)
 | 
			
		||||
    assert (args.from_pretrained_weights is None and args.from_pretrained_config is None) or \
 | 
			
		||||
           (args.from_pretrained_weights is not None and args.from_pretrained_config is not None)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    ### TOKENIZER ###
 | 
			
		||||
    if args.teacher_type == 'bert':
 | 
			
		||||
        tokenizer = BertTokenizer.from_pretrained(args.teacher_name)
 | 
			
		||||
    elif args.teacher_type == 'roberta':
 | 
			
		||||
        tokenizer = RobertaTokenizer.from_pretrained(args.teacher_name)
 | 
			
		||||
    special_tok_ids = {}
 | 
			
		||||
    for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
 | 
			
		||||
        idx = tokenizer.all_special_tokens.index(tok_symbol)
 | 
			
		||||
        special_tok_ids[tok_name] = tokenizer.all_special_ids[idx]
 | 
			
		||||
    logger.info(f'Special tokens {special_tok_ids}')
 | 
			
		||||
    args.special_tok_ids = special_tok_ids
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    ## DATA LOADER ##
 | 
			
		||||
    logger.info(f'Loading data from {args.data_file}')
 | 
			
		||||
    with open(args.data_file, 'rb') as fp:
 | 
			
		||||
        data = pickle.load(fp)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    assert os.path.isfile(args.token_counts)
 | 
			
		||||
    logger.info(f'Loading token counts from {args.token_counts} (already pre-computed)')
 | 
			
		||||
    with open(args.token_counts, 'rb') as fp:
 | 
			
		||||
        counts = pickle.load(fp)
 | 
			
		||||
        assert len(counts) == args.vocab_size
 | 
			
		||||
    token_probs = np.maximum(counts, 1) ** -args.mlm_smoothing
 | 
			
		||||
    for idx in special_tok_ids.values():
 | 
			
		||||
        token_probs[idx] = 0.  # do not predict special tokens
 | 
			
		||||
    token_probs = torch.from_numpy(token_probs)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    train_dataloader = Dataset(params=args, data=data)
 | 
			
		||||
    logger.info(f'Data loader created.')
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    ## STUDENT ##
 | 
			
		||||
    if args.from_pretrained_weights is not None:
 | 
			
		||||
        assert os.path.isfile(args.from_pretrained_weights)
 | 
			
		||||
        assert os.path.isfile(args.from_pretrained_config)
 | 
			
		||||
        logger.info(f'Loading pretrained weights from {args.from_pretrained_weights}')
 | 
			
		||||
        logger.info(f'Loading pretrained config from {args.from_pretrained_config}')
 | 
			
		||||
        stu_architecture_config = DistilBertConfig.from_json_file(args.from_pretrained_config)
 | 
			
		||||
        stu_architecture_config.output_hidden_states = True
 | 
			
		||||
        student = DistilBertForMaskedLM.from_pretrained(args.from_pretrained_weights,
 | 
			
		||||
                                                        config=stu_architecture_config)
 | 
			
		||||
    else:
 | 
			
		||||
        args.vocab_size_or_config_json_file = args.vocab_size
 | 
			
		||||
        stu_architecture_config = DistilBertConfig(**vars(args), output_hidden_states=True)
 | 
			
		||||
        student = DistilBertForMaskedLM(stu_architecture_config)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    if args.n_gpu > 0:
 | 
			
		||||
        student.to(f'cuda:{args.local_rank}')
 | 
			
		||||
    logger.info(f'Student loaded.')
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    ## TEACHER ##
 | 
			
		||||
    if args.teacher_type == 'bert':
 | 
			
		||||
        teacher = BertForMaskedLM.from_pretrained(args.teacher_name, output_hidden_states=True)
 | 
			
		||||
    elif args.teacher_type == 'roberta':
 | 
			
		||||
        teacher = RobertaForMaskedLM.from_pretrained(args.teacher_name, output_hidden_states=True)
 | 
			
		||||
    if args.n_gpu > 0:
 | 
			
		||||
        teacher.to(f'cuda:{args.local_rank}')
 | 
			
		||||
    logger.info(f'Teacher loaded from {args.teacher_name}.')
 | 
			
		||||
 | 
			
		||||
    ## DISTILLER ##
 | 
			
		||||
    torch.cuda.empty_cache()
 | 
			
		||||
    distiller = Distiller(params=args,
 | 
			
		||||
                          dataloader=train_dataloader,
 | 
			
		||||
                          token_probs=token_probs,
 | 
			
		||||
                          student=student,
 | 
			
		||||
                          teacher=teacher)
 | 
			
		||||
    distiller.train()
 | 
			
		||||
    logger.info("Let's go get some drinks.")
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
if __name__ == "__main__":
 | 
			
		||||
    main()
 | 
			
		||||
							
								
								
									
										129
									
								
								examples/distillation/utils.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						@ -0,0 +1,129 @@
 | 
			
		||||
# coding=utf-8
 | 
			
		||||
# Copyright 2019-present, the HuggingFace Inc. team and Facebook, Inc.
 | 
			
		||||
#
 | 
			
		||||
# Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
# you may not use this file except in compliance with the License.
 | 
			
		||||
# You may obtain a copy of the License at
 | 
			
		||||
#
 | 
			
		||||
#     http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
#
 | 
			
		||||
# Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
# distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
# See the License for the specific language governing permissions and
 | 
			
		||||
# limitations under the License.
 | 
			
		||||
""" Utils to train DistilBERT
 | 
			
		||||
    adapted in part from Facebook, Inc XLM model (https://github.com/facebookresearch/XLM)
 | 
			
		||||
"""
 | 
			
		||||
import git
 | 
			
		||||
import json
 | 
			
		||||
import os
 | 
			
		||||
import socket
 | 
			
		||||
import torch
 | 
			
		||||
import numpy as np
 | 
			
		||||
 | 
			
		||||
import logging
 | 
			
		||||
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d -  %(message)s',
 | 
			
		||||
                    datefmt = '%m/%d/%Y %H:%M:%S',
 | 
			
		||||
                    level = logging.INFO)
 | 
			
		||||
logger = logging.getLogger(__name__)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def git_log(folder_path: str):
 | 
			
		||||
    """
 | 
			
		||||
    Log commit info.
 | 
			
		||||
    """
 | 
			
		||||
    repo = git.Repo(search_parent_directories=True)
 | 
			
		||||
    repo_infos = {
 | 
			
		||||
        'repo_id': str(repo),
 | 
			
		||||
        'repo_sha': str(repo.head.object.hexsha),
 | 
			
		||||
        'repo_branch': str(repo.active_branch)
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    with open(os.path.join(folder_path, 'git_log.json'), 'w') as f:
 | 
			
		||||
        json.dump(repo_infos, f, indent=4)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def init_gpu_params(params):
 | 
			
		||||
    """
 | 
			
		||||
    Handle single and multi-GPU / multi-node.
 | 
			
		||||
    """
 | 
			
		||||
    if params.n_gpu <= 0:
 | 
			
		||||
        params.local_rank = 0
 | 
			
		||||
        params.master_port = -1
 | 
			
		||||
        params.is_master = True
 | 
			
		||||
        params.multi_gpu = False
 | 
			
		||||
        return
 | 
			
		||||
 | 
			
		||||
    assert torch.cuda.is_available()
 | 
			
		||||
 | 
			
		||||
    logger.info('Initializing GPUs')
 | 
			
		||||
    if params.n_gpu > 1:
 | 
			
		||||
        assert params.local_rank != -1
 | 
			
		||||
 | 
			
		||||
        params.world_size = int(os.environ['WORLD_SIZE'])
 | 
			
		||||
        params.n_gpu_per_node = int(os.environ['N_GPU_NODE'])
 | 
			
		||||
        params.global_rank = int(os.environ['RANK'])
 | 
			
		||||
 | 
			
		||||
        # number of nodes / node ID
 | 
			
		||||
        params.n_nodes = params.world_size // params.n_gpu_per_node
 | 
			
		||||
        params.node_id = params.global_rank // params.n_gpu_per_node
 | 
			
		||||
        params.multi_gpu = True
 | 
			
		||||
 | 
			
		||||
        assert params.n_nodes == int(os.environ['N_NODES'])
 | 
			
		||||
        assert params.node_id == int(os.environ['NODE_RANK'])
 | 
			
		||||
 | 
			
		||||
    # local job (single GPU)
 | 
			
		||||
    else:
 | 
			
		||||
        assert params.local_rank == -1
 | 
			
		||||
 | 
			
		||||
        params.n_nodes = 1
 | 
			
		||||
        params.node_id = 0
 | 
			
		||||
        params.local_rank = 0
 | 
			
		||||
        params.global_rank = 0
 | 
			
		||||
        params.world_size = 1
 | 
			
		||||
        params.n_gpu_per_node = 1
 | 
			
		||||
        params.multi_gpu = False
 | 
			
		||||
 | 
			
		||||
    # sanity checks
 | 
			
		||||
    assert params.n_nodes >= 1
 | 
			
		||||
    assert 0 <= params.node_id < params.n_nodes
 | 
			
		||||
    assert 0 <= params.local_rank <= params.global_rank < params.world_size
 | 
			
		||||
    assert params.world_size == params.n_nodes * params.n_gpu_per_node
 | 
			
		||||
 | 
			
		||||
    # define whether this is the master process / if we are in multi-node distributed mode
 | 
			
		||||
    params.is_master = params.node_id == 0 and params.local_rank == 0
 | 
			
		||||
    params.multi_node = params.n_nodes > 1
 | 
			
		||||
 | 
			
		||||
    # summary
 | 
			
		||||
    PREFIX = f"--- Global rank: {params.global_rank} - "
 | 
			
		||||
    logger.info(PREFIX + "Number of nodes: %i" % params.n_nodes)
 | 
			
		||||
    logger.info(PREFIX + "Node ID        : %i" % params.node_id)
 | 
			
		||||
    logger.info(PREFIX + "Local rank     : %i" % params.local_rank)
 | 
			
		||||
    logger.info(PREFIX + "World size     : %i" % params.world_size)
 | 
			
		||||
    logger.info(PREFIX + "GPUs per node  : %i" % params.n_gpu_per_node)
 | 
			
		||||
    logger.info(PREFIX + "Master         : %s" % str(params.is_master))
 | 
			
		||||
    logger.info(PREFIX + "Multi-node     : %s" % str(params.multi_node))
 | 
			
		||||
    logger.info(PREFIX + "Multi-GPU      : %s" % str(params.multi_gpu))
 | 
			
		||||
    logger.info(PREFIX + "Hostname       : %s" % socket.gethostname())
 | 
			
		||||
 | 
			
		||||
    # set GPU device
 | 
			
		||||
    torch.cuda.set_device(params.local_rank)
 | 
			
		||||
 | 
			
		||||
    # initialize multi-GPU
 | 
			
		||||
    if params.multi_gpu:
 | 
			
		||||
        logger.info("Initializing PyTorch distributed")
 | 
			
		||||
        torch.distributed.init_process_group(
 | 
			
		||||
            init_method='env://',
 | 
			
		||||
            backend='nccl',
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def set_seed(args):
 | 
			
		||||
    """
 | 
			
		||||
    Set the random seed.
 | 
			
		||||
    """
 | 
			
		||||
    np.random.seed(args.seed)
 | 
			
		||||
    torch.manual_seed(args.seed)
 | 
			
		||||
    if args.n_gpu > 0:
 | 
			
		||||
        torch.cuda.manual_seed_all(args.seed)
 | 
			
		||||
@ -1,297 +0,0 @@
 | 
			
		||||
# coding=utf-8
 | 
			
		||||
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
 | 
			
		||||
#
 | 
			
		||||
# Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
# you may not use this file except in compliance with the License.
 | 
			
		||||
# You may obtain a copy of the License at
 | 
			
		||||
#
 | 
			
		||||
#     http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
#
 | 
			
		||||
# Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
# distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
# See the License for the specific language governing permissions and
 | 
			
		||||
# limitations under the License.
 | 
			
		||||
"""Extract pre-computed feature vectors from a PyTorch BERT model."""
 | 
			
		||||
 | 
			
		||||
from __future__ import absolute_import
 | 
			
		||||
from __future__ import division
 | 
			
		||||
from __future__ import print_function
 | 
			
		||||
 | 
			
		||||
import argparse
 | 
			
		||||
import collections
 | 
			
		||||
import logging
 | 
			
		||||
import json
 | 
			
		||||
import re
 | 
			
		||||
 | 
			
		||||
import torch
 | 
			
		||||
from torch.utils.data import TensorDataset, DataLoader, SequentialSampler
 | 
			
		||||
from torch.utils.data.distributed import DistributedSampler
 | 
			
		||||
 | 
			
		||||
from pytorch_pretrained_bert.tokenization import BertTokenizer
 | 
			
		||||
from pytorch_pretrained_bert.modeling import BertModel
 | 
			
		||||
 | 
			
		||||
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s -   %(message)s', 
 | 
			
		||||
                    datefmt = '%m/%d/%Y %H:%M:%S',
 | 
			
		||||
                    level = logging.INFO)
 | 
			
		||||
logger = logging.getLogger(__name__)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class InputExample(object):
 | 
			
		||||
 | 
			
		||||
    def __init__(self, unique_id, text_a, text_b):
 | 
			
		||||
        self.unique_id = unique_id
 | 
			
		||||
        self.text_a = text_a
 | 
			
		||||
        self.text_b = text_b
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class InputFeatures(object):
 | 
			
		||||
    """A single set of features of data."""
 | 
			
		||||
 | 
			
		||||
    def __init__(self, unique_id, tokens, input_ids, input_mask, input_type_ids):
 | 
			
		||||
        self.unique_id = unique_id
 | 
			
		||||
        self.tokens = tokens
 | 
			
		||||
        self.input_ids = input_ids
 | 
			
		||||
        self.input_mask = input_mask
 | 
			
		||||
        self.input_type_ids = input_type_ids
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def convert_examples_to_features(examples, seq_length, tokenizer):
 | 
			
		||||
    """Loads a data file into a list of `InputFeature`s."""
 | 
			
		||||
 | 
			
		||||
    features = []
 | 
			
		||||
    for (ex_index, example) in enumerate(examples):
 | 
			
		||||
        tokens_a = tokenizer.tokenize(example.text_a)
 | 
			
		||||
 | 
			
		||||
        tokens_b = None
 | 
			
		||||
        if example.text_b:
 | 
			
		||||
            tokens_b = tokenizer.tokenize(example.text_b)
 | 
			
		||||
 | 
			
		||||
        if tokens_b:
 | 
			
		||||
            # Modifies `tokens_a` and `tokens_b` in place so that the total
 | 
			
		||||
            # length is less than the specified length.
 | 
			
		||||
            # Account for [CLS], [SEP], [SEP] with "- 3"
 | 
			
		||||
            _truncate_seq_pair(tokens_a, tokens_b, seq_length - 3)
 | 
			
		||||
        else:
 | 
			
		||||
            # Account for [CLS] and [SEP] with "- 2"
 | 
			
		||||
            if len(tokens_a) > seq_length - 2:
 | 
			
		||||
                tokens_a = tokens_a[0:(seq_length - 2)]
 | 
			
		||||
 | 
			
		||||
        # The convention in BERT is:
 | 
			
		||||
        # (a) For sequence pairs:
 | 
			
		||||
        #  tokens:   [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
 | 
			
		||||
        #  type_ids:   0   0  0    0    0     0      0   0    1  1  1   1  1   1
 | 
			
		||||
        # (b) For single sequences:
 | 
			
		||||
        #  tokens:   [CLS] the dog is hairy . [SEP]
 | 
			
		||||
        #  type_ids:   0   0   0   0  0     0   0
 | 
			
		||||
        #
 | 
			
		||||
        # Where "type_ids" are used to indicate whether this is the first
 | 
			
		||||
        # sequence or the second sequence. The embedding vectors for `type=0` and
 | 
			
		||||
        # `type=1` were learned during pre-training and are added to the wordpiece
 | 
			
		||||
        # embedding vector (and position vector). This is not *strictly* necessary
 | 
			
		||||
        # since the [SEP] token unambigiously separates the sequences, but it makes
 | 
			
		||||
        # it easier for the model to learn the concept of sequences.
 | 
			
		||||
        #
 | 
			
		||||
        # For classification tasks, the first vector (corresponding to [CLS]) is
 | 
			
		||||
        # used as as the "sentence vector". Note that this only makes sense because
 | 
			
		||||
        # the entire model is fine-tuned.
 | 
			
		||||
        tokens = []
 | 
			
		||||
        input_type_ids = []
 | 
			
		||||
        tokens.append("[CLS]")
 | 
			
		||||
        input_type_ids.append(0)
 | 
			
		||||
        for token in tokens_a:
 | 
			
		||||
            tokens.append(token)
 | 
			
		||||
            input_type_ids.append(0)
 | 
			
		||||
        tokens.append("[SEP]")
 | 
			
		||||
        input_type_ids.append(0)
 | 
			
		||||
 | 
			
		||||
        if tokens_b:
 | 
			
		||||
            for token in tokens_b:
 | 
			
		||||
                tokens.append(token)
 | 
			
		||||
                input_type_ids.append(1)
 | 
			
		||||
            tokens.append("[SEP]")
 | 
			
		||||
            input_type_ids.append(1)
 | 
			
		||||
 | 
			
		||||
        input_ids = tokenizer.convert_tokens_to_ids(tokens)
 | 
			
		||||
 | 
			
		||||
        # The mask has 1 for real tokens and 0 for padding tokens. Only real
 | 
			
		||||
        # tokens are attended to.
 | 
			
		||||
        input_mask = [1] * len(input_ids)
 | 
			
		||||
 | 
			
		||||
        # Zero-pad up to the sequence length.
 | 
			
		||||
        while len(input_ids) < seq_length:
 | 
			
		||||
            input_ids.append(0)
 | 
			
		||||
            input_mask.append(0)
 | 
			
		||||
            input_type_ids.append(0)
 | 
			
		||||
 | 
			
		||||
        assert len(input_ids) == seq_length
 | 
			
		||||
        assert len(input_mask) == seq_length
 | 
			
		||||
        assert len(input_type_ids) == seq_length
 | 
			
		||||
 | 
			
		||||
        if ex_index < 5:
 | 
			
		||||
            logger.info("*** Example ***")
 | 
			
		||||
            logger.info("unique_id: %s" % (example.unique_id))
 | 
			
		||||
            logger.info("tokens: %s" % " ".join([str(x) for x in tokens]))
 | 
			
		||||
            logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
 | 
			
		||||
            logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
 | 
			
		||||
            logger.info(
 | 
			
		||||
                "input_type_ids: %s" % " ".join([str(x) for x in input_type_ids]))
 | 
			
		||||
 | 
			
		||||
        features.append(
 | 
			
		||||
            InputFeatures(
 | 
			
		||||
                unique_id=example.unique_id,
 | 
			
		||||
                tokens=tokens,
 | 
			
		||||
                input_ids=input_ids,
 | 
			
		||||
                input_mask=input_mask,
 | 
			
		||||
                input_type_ids=input_type_ids))
 | 
			
		||||
    return features
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
 | 
			
		||||
    """Truncates a sequence pair in place to the maximum length."""
 | 
			
		||||
 | 
			
		||||
    # This is a simple heuristic which will always truncate the longer sequence
 | 
			
		||||
    # one token at a time. This makes more sense than truncating an equal percent
 | 
			
		||||
    # of tokens from each, since if one sequence is very short then each token
 | 
			
		||||
    # that's truncated likely contains more information than a longer sequence.
 | 
			
		||||
    while True:
 | 
			
		||||
        total_length = len(tokens_a) + len(tokens_b)
 | 
			
		||||
        if total_length <= max_length:
 | 
			
		||||
            break
 | 
			
		||||
        if len(tokens_a) > len(tokens_b):
 | 
			
		||||
            tokens_a.pop()
 | 
			
		||||
        else:
 | 
			
		||||
            tokens_b.pop()
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def read_examples(input_file):
 | 
			
		||||
    """Read a list of `InputExample`s from an input file."""
 | 
			
		||||
    examples = []
 | 
			
		||||
    unique_id = 0
 | 
			
		||||
    with open(input_file, "r", encoding='utf-8') as reader:
 | 
			
		||||
        while True:
 | 
			
		||||
            line = reader.readline()
 | 
			
		||||
            if not line:
 | 
			
		||||
                break
 | 
			
		||||
            line = line.strip()
 | 
			
		||||
            text_a = None
 | 
			
		||||
            text_b = None
 | 
			
		||||
            m = re.match(r"^(.*) \|\|\| (.*)$", line)
 | 
			
		||||
            if m is None:
 | 
			
		||||
                text_a = line
 | 
			
		||||
            else:
 | 
			
		||||
                text_a = m.group(1)
 | 
			
		||||
                text_b = m.group(2)
 | 
			
		||||
            examples.append(
 | 
			
		||||
                InputExample(unique_id=unique_id, text_a=text_a, text_b=text_b))
 | 
			
		||||
            unique_id += 1
 | 
			
		||||
    return examples
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def main():
 | 
			
		||||
    parser = argparse.ArgumentParser()
 | 
			
		||||
 | 
			
		||||
    ## Required parameters
 | 
			
		||||
    parser.add_argument("--input_file", default=None, type=str, required=True)
 | 
			
		||||
    parser.add_argument("--output_file", default=None, type=str, required=True)
 | 
			
		||||
    parser.add_argument("--bert_model", default=None, type=str, required=True,
 | 
			
		||||
                        help="Bert pre-trained model selected in the list: bert-base-uncased, "
 | 
			
		||||
                             "bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.")
 | 
			
		||||
 | 
			
		||||
    ## Other parameters
 | 
			
		||||
    parser.add_argument("--do_lower_case", action='store_true', help="Set this flag if you are using an uncased model.")
 | 
			
		||||
    parser.add_argument("--layers", default="-1,-2,-3,-4", type=str)
 | 
			
		||||
    parser.add_argument("--max_seq_length", default=128, type=int,
 | 
			
		||||
                        help="The maximum total input sequence length after WordPiece tokenization. Sequences longer "
 | 
			
		||||
                            "than this will be truncated, and sequences shorter than this will be padded.")
 | 
			
		||||
    parser.add_argument("--batch_size", default=32, type=int, help="Batch size for predictions.")
 | 
			
		||||
    parser.add_argument("--local_rank",
 | 
			
		||||
                        type=int,
 | 
			
		||||
                        default=-1,
 | 
			
		||||
                        help = "local_rank for distributed training on gpus")
 | 
			
		||||
    parser.add_argument("--no_cuda",
 | 
			
		||||
                        action='store_true',
 | 
			
		||||
                        help="Whether not to use CUDA when available")
 | 
			
		||||
 | 
			
		||||
    args = parser.parse_args()
 | 
			
		||||
 | 
			
		||||
    if args.local_rank == -1 or args.no_cuda:
 | 
			
		||||
        device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
 | 
			
		||||
        n_gpu = torch.cuda.device_count()
 | 
			
		||||
    else:
 | 
			
		||||
        device = torch.device("cuda", args.local_rank)
 | 
			
		||||
        n_gpu = 1
 | 
			
		||||
        # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
 | 
			
		||||
        torch.distributed.init_process_group(backend='nccl')
 | 
			
		||||
    logger.info("device: {} n_gpu: {} distributed training: {}".format(device, n_gpu, bool(args.local_rank != -1)))
 | 
			
		||||
 | 
			
		||||
    layer_indexes = [int(x) for x in args.layers.split(",")]
 | 
			
		||||
 | 
			
		||||
    tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case)
 | 
			
		||||
 | 
			
		||||
    examples = read_examples(args.input_file)
 | 
			
		||||
 | 
			
		||||
    features = convert_examples_to_features(
 | 
			
		||||
        examples=examples, seq_length=args.max_seq_length, tokenizer=tokenizer)
 | 
			
		||||
 | 
			
		||||
    unique_id_to_feature = {}
 | 
			
		||||
    for feature in features:
 | 
			
		||||
        unique_id_to_feature[feature.unique_id] = feature
 | 
			
		||||
 | 
			
		||||
    model = BertModel.from_pretrained(args.bert_model)
 | 
			
		||||
    model.to(device)
 | 
			
		||||
 | 
			
		||||
    if args.local_rank != -1:
 | 
			
		||||
        model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],
 | 
			
		||||
                                                          output_device=args.local_rank)
 | 
			
		||||
    elif n_gpu > 1:
 | 
			
		||||
        model = torch.nn.DataParallel(model)
 | 
			
		||||
 | 
			
		||||
    all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
 | 
			
		||||
    all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
 | 
			
		||||
    all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long)
 | 
			
		||||
 | 
			
		||||
    eval_data = TensorDataset(all_input_ids, all_input_mask, all_example_index)
 | 
			
		||||
    if args.local_rank == -1:
 | 
			
		||||
        eval_sampler = SequentialSampler(eval_data)
 | 
			
		||||
    else:
 | 
			
		||||
        eval_sampler = DistributedSampler(eval_data)
 | 
			
		||||
    eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.batch_size)
 | 
			
		||||
 | 
			
		||||
    model.eval()
 | 
			
		||||
    with open(args.output_file, "w", encoding='utf-8') as writer:
 | 
			
		||||
        for input_ids, input_mask, example_indices in eval_dataloader:
 | 
			
		||||
            input_ids = input_ids.to(device)
 | 
			
		||||
            input_mask = input_mask.to(device)
 | 
			
		||||
 | 
			
		||||
            all_encoder_layers, _ = model(input_ids, token_type_ids=None, attention_mask=input_mask)
 | 
			
		||||
            all_encoder_layers = all_encoder_layers
 | 
			
		||||
 | 
			
		||||
            for b, example_index in enumerate(example_indices):
 | 
			
		||||
                feature = features[example_index.item()]
 | 
			
		||||
                unique_id = int(feature.unique_id)
 | 
			
		||||
                # feature = unique_id_to_feature[unique_id]
 | 
			
		||||
                output_json = collections.OrderedDict()
 | 
			
		||||
                output_json["linex_index"] = unique_id
 | 
			
		||||
                all_out_features = []
 | 
			
		||||
                for (i, token) in enumerate(feature.tokens):
 | 
			
		||||
                    all_layers = []
 | 
			
		||||
                    for (j, layer_index) in enumerate(layer_indexes):
 | 
			
		||||
                        layer_output = all_encoder_layers[int(layer_index)].detach().cpu().numpy()
 | 
			
		||||
                        layer_output = layer_output[b]
 | 
			
		||||
                        layers = collections.OrderedDict()
 | 
			
		||||
                        layers["index"] = layer_index
 | 
			
		||||
                        layers["values"] = [
 | 
			
		||||
                            round(x.item(), 6) for x in layer_output[i]
 | 
			
		||||
                        ]
 | 
			
		||||
                        all_layers.append(layers)
 | 
			
		||||
                    out_features = collections.OrderedDict()
 | 
			
		||||
                    out_features["token"] = token
 | 
			
		||||
                    out_features["layers"] = all_layers
 | 
			
		||||
                    all_out_features.append(out_features)
 | 
			
		||||
                output_json["features"] = all_out_features
 | 
			
		||||
                writer.write(json.dumps(output_json) + "\n")
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
if __name__ == "__main__":
 | 
			
		||||
    main()
 | 
			
		||||
@ -1,64 +0,0 @@
 | 
			
		||||
# BERT Model Finetuning using Masked Language Modeling objective
 | 
			
		||||
 | 
			
		||||
## Introduction
 | 
			
		||||
 | 
			
		||||
The three example scripts in this folder can be used to **fine-tune** a pre-trained BERT model using the pretraining objective (combination of masked language modeling and next sentence prediction loss). In general, pretrained models like BERT are first trained with a pretraining objective (masked language modeling and next sentence prediction for BERT) on a large and general natural language corpus. A classifier head is then added on top of the pre-trained architecture and the model is quickly fine-tuned on a target task, while still (hopefully) retaining its general language understanding. This greatly reduces overfitting and yields state-of-the-art results, especially when training data for the target task are limited.
 | 
			
		||||
 | 
			
		||||
The [ULMFiT paper](https://arxiv.org/abs/1801.06146) took a slightly different approach, however, and added an intermediate step in which the model is fine-tuned on text **from the same domain as the target task and using the pretraining objective** before the final stage in which the classifier head is added and the model is trained on the target task itself. This paper reported significantly improved results from this step, and found that they could get high-quality classifications even with only tiny numbers (<1000) of labelled training examples, as long as they had a lot of unlabelled data from the target domain.
 | 
			
		||||
 | 
			
		||||
The BERT model has more capacity than the LSTM models used in the ULMFiT work, but the [BERT paper](https://arxiv.org/abs/1810.04805) did not test finetuning using the pretraining objective and at the present stage there aren't many examples of this approach being used for Transformer-based language models. As such, it's hard to predict what effect this step will have on final model performance, but it's reasonable to conjecture that this approach can improve the final classification performance, especially when a large unlabelled corpus from the target domain is available, labelled data is limited, or the target domain is very unusual and different from 'normal' English text. If you are aware of any literature on this subject, please feel free to add it in here, or open an issue and tag me (@Rocketknight1) and I'll include it.
 | 
			
		||||
 | 
			
		||||
## Input format
 | 
			
		||||
 | 
			
		||||
The scripts in this folder expect a single file as input, consisting of untokenized text, with one **sentence** per line, and one blank line between documents. The reason for the sentence splitting is that part of BERT's training involves a _next sentence_ objective in which the model must predict whether two sequences of text are contiguous text from the same document or not, and to avoid making the task _too easy_, the split point between the sequences is always at the end of a sentence. The linebreaks in the file are therefore necessary to mark the points where the text can be split.
 | 
			
		||||
 | 
			
		||||
## Usage
 | 
			
		||||
 | 
			
		||||
There are two ways to fine-tune a language model using these scripts. The first _quick_ approach is to use [`simple_lm_finetuning.py`](./simple_lm_finetuning.py). This script does everything in a single script, but generates training instances that consist of just two sentences. This is quite different from the BERT paper, where (confusingly) the NextSentence task concatenated sentences together from each document to form two long multi-sentences, which the paper just referred to as _sentences_. The difference between this simple approach and the original paper approach can have a significant effect for long sequences since two sentences will be much shorter than the max sequence length. In this case, most of each training example will just consist of blank padding characters, which wastes a lot of computation and results in a model that isn't really training on long sequences.
 | 
			
		||||
 | 
			
		||||
As such, the preferred approach (assuming you have documents containing multiple contiguous sentences from your target domain) is to use [`pregenerate_training_data.py`](./pregenerate_training_data.py) to pre-process your data into training examples following the methodology used for LM training in the original BERT paper and repository. Since there is a significant random component to training data generation for BERT, this script includes an option to generate multiple _epochs_ of pre-processed data, to avoid training on the same random splits each epoch. Generating an epoch of data for each training epoch should result a better final model, and so we recommend doing so.
 | 
			
		||||
 | 
			
		||||
You can then train on the pregenerated data using [`finetune_on_pregenerated.py`](./finetune_on_pregenerated.py), and pointing it to the folder created by [`pregenerate_training_data.py`](./pregenerate_training_data.py). Note that you should use the same `bert_model` and case options for both! Also note that `max_seq_len` does not need to be specified for the [`finetune_on_pregenerated.py`](./finetune_on_pregenerated.py) script, as it is inferred from the training examples.
 | 
			
		||||
 | 
			
		||||
There are various options that can be tweaked, but they are mostly set to the values from the BERT paper/repository and default values should make sense. The most relevant ones are:
 | 
			
		||||
 | 
			
		||||
- `--max_seq_len`: Controls the length of training examples (in wordpiece tokens) seen by the model. Defaults to 128 but can be set as high as 512. Higher values may yield stronger language models at the cost of slower and more memory-intensive training.
 | 
			
		||||
- `--fp16`: Enables fast half-precision training on recent GPUs.
 | 
			
		||||
 | 
			
		||||
In addition, if memory usage is an issue, especially when training on a single GPU, reducing `--train_batch_size` from the default 32 to a lower number (4-16) can be helpful, or leaving `--train_batch_size` at the default and increasing `--gradient_accumulation_steps` to 2-8. Changing `--gradient_accumulation_steps` may be preferable as alterations to the batch size may require corresponding changes in the learning rate to compensate. There is also a `--reduce_memory` option for both the `pregenerate_training_data.py` and `finetune_on_pregenerated.py` scripts that spills data to disc in shelf objects or numpy memmaps rather than retaining it in memory, which significantly reduces memory usage with little performance impact.
 | 
			
		||||
 | 
			
		||||
## Examples
 | 
			
		||||
 | 
			
		||||
### Simple fine-tuning
 | 
			
		||||
 | 
			
		||||
```
 | 
			
		||||
python3 simple_lm_finetuning.py 
 | 
			
		||||
--train_corpus my_corpus.txt 
 | 
			
		||||
--bert_model bert-base-uncased 
 | 
			
		||||
--do_lower_case 
 | 
			
		||||
--output_dir finetuned_lm/
 | 
			
		||||
--do_train
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
### Pregenerating training data
 | 
			
		||||
 | 
			
		||||
```
 | 
			
		||||
python3 pregenerate_training_data.py
 | 
			
		||||
--train_corpus my_corpus.txt
 | 
			
		||||
--bert_model bert-base-uncased
 | 
			
		||||
--do_lower_case
 | 
			
		||||
--output_dir training/
 | 
			
		||||
--epochs_to_generate 3
 | 
			
		||||
--max_seq_len 256
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
### Training on pregenerated data
 | 
			
		||||
 | 
			
		||||
```
 | 
			
		||||
python3 finetune_on_pregenerated.py
 | 
			
		||||
--pregenerated_data training/
 | 
			
		||||
--bert_model bert-base-uncased
 | 
			
		||||
--do_lower_case
 | 
			
		||||
--output_dir finetuned_lm/
 | 
			
		||||
--epochs 3
 | 
			
		||||
```
 | 
			
		||||
@ -1,334 +0,0 @@
 | 
			
		||||
from argparse import ArgumentParser
 | 
			
		||||
from pathlib import Path
 | 
			
		||||
import torch
 | 
			
		||||
import logging
 | 
			
		||||
import json
 | 
			
		||||
import random
 | 
			
		||||
import numpy as np
 | 
			
		||||
from collections import namedtuple
 | 
			
		||||
from tempfile import TemporaryDirectory
 | 
			
		||||
 | 
			
		||||
from torch.utils.data import DataLoader, Dataset, RandomSampler
 | 
			
		||||
from torch.utils.data.distributed import DistributedSampler
 | 
			
		||||
from tqdm import tqdm
 | 
			
		||||
 | 
			
		||||
from pytorch_pretrained_bert.modeling import BertForPreTraining
 | 
			
		||||
from pytorch_pretrained_bert.tokenization import BertTokenizer
 | 
			
		||||
from pytorch_pretrained_bert.optimization import BertAdam, WarmupLinearSchedule
 | 
			
		||||
 | 
			
		||||
InputFeatures = namedtuple("InputFeatures", "input_ids input_mask segment_ids lm_label_ids is_next")
 | 
			
		||||
 | 
			
		||||
log_format = '%(asctime)-10s: %(message)s'
 | 
			
		||||
logging.basicConfig(level=logging.INFO, format=log_format)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def convert_example_to_features(example, tokenizer, max_seq_length):
 | 
			
		||||
    tokens = example["tokens"]
 | 
			
		||||
    segment_ids = example["segment_ids"]
 | 
			
		||||
    is_random_next = example["is_random_next"]
 | 
			
		||||
    masked_lm_positions = example["masked_lm_positions"]
 | 
			
		||||
    masked_lm_labels = example["masked_lm_labels"]
 | 
			
		||||
 | 
			
		||||
    assert len(tokens) == len(segment_ids) <= max_seq_length  # The preprocessed data should be already truncated
 | 
			
		||||
    input_ids = tokenizer.convert_tokens_to_ids(tokens)
 | 
			
		||||
    masked_label_ids = tokenizer.convert_tokens_to_ids(masked_lm_labels)
 | 
			
		||||
 | 
			
		||||
    input_array = np.zeros(max_seq_length, dtype=np.int)
 | 
			
		||||
    input_array[:len(input_ids)] = input_ids
 | 
			
		||||
 | 
			
		||||
    mask_array = np.zeros(max_seq_length, dtype=np.bool)
 | 
			
		||||
    mask_array[:len(input_ids)] = 1
 | 
			
		||||
 | 
			
		||||
    segment_array = np.zeros(max_seq_length, dtype=np.bool)
 | 
			
		||||
    segment_array[:len(segment_ids)] = segment_ids
 | 
			
		||||
 | 
			
		||||
    lm_label_array = np.full(max_seq_length, dtype=np.int, fill_value=-1)
 | 
			
		||||
    lm_label_array[masked_lm_positions] = masked_label_ids
 | 
			
		||||
 | 
			
		||||
    features = InputFeatures(input_ids=input_array,
 | 
			
		||||
                             input_mask=mask_array,
 | 
			
		||||
                             segment_ids=segment_array,
 | 
			
		||||
                             lm_label_ids=lm_label_array,
 | 
			
		||||
                             is_next=is_random_next)
 | 
			
		||||
    return features
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class PregeneratedDataset(Dataset):
 | 
			
		||||
    def __init__(self, training_path, epoch, tokenizer, num_data_epochs, reduce_memory=False):
 | 
			
		||||
        self.vocab = tokenizer.vocab
 | 
			
		||||
        self.tokenizer = tokenizer
 | 
			
		||||
        self.epoch = epoch
 | 
			
		||||
        self.data_epoch = epoch % num_data_epochs
 | 
			
		||||
        data_file = training_path / f"epoch_{self.data_epoch}.json"
 | 
			
		||||
        metrics_file = training_path / f"epoch_{self.data_epoch}_metrics.json"
 | 
			
		||||
        assert data_file.is_file() and metrics_file.is_file()
 | 
			
		||||
        metrics = json.loads(metrics_file.read_text())
 | 
			
		||||
        num_samples = metrics['num_training_examples']
 | 
			
		||||
        seq_len = metrics['max_seq_len']
 | 
			
		||||
        self.temp_dir = None
 | 
			
		||||
        self.working_dir = None
 | 
			
		||||
        if reduce_memory:
 | 
			
		||||
            self.temp_dir = TemporaryDirectory()
 | 
			
		||||
            self.working_dir = Path(self.temp_dir.name)
 | 
			
		||||
            input_ids = np.memmap(filename=self.working_dir/'input_ids.memmap',
 | 
			
		||||
                                  mode='w+', dtype=np.int32, shape=(num_samples, seq_len))
 | 
			
		||||
            input_masks = np.memmap(filename=self.working_dir/'input_masks.memmap',
 | 
			
		||||
                                    shape=(num_samples, seq_len), mode='w+', dtype=np.bool)
 | 
			
		||||
            segment_ids = np.memmap(filename=self.working_dir/'segment_ids.memmap',
 | 
			
		||||
                                    shape=(num_samples, seq_len), mode='w+', dtype=np.bool)
 | 
			
		||||
            lm_label_ids = np.memmap(filename=self.working_dir/'lm_label_ids.memmap',
 | 
			
		||||
                                     shape=(num_samples, seq_len), mode='w+', dtype=np.int32)
 | 
			
		||||
            lm_label_ids[:] = -1
 | 
			
		||||
            is_nexts = np.memmap(filename=self.working_dir/'is_nexts.memmap',
 | 
			
		||||
                                 shape=(num_samples,), mode='w+', dtype=np.bool)
 | 
			
		||||
        else:
 | 
			
		||||
            input_ids = np.zeros(shape=(num_samples, seq_len), dtype=np.int32)
 | 
			
		||||
            input_masks = np.zeros(shape=(num_samples, seq_len), dtype=np.bool)
 | 
			
		||||
            segment_ids = np.zeros(shape=(num_samples, seq_len), dtype=np.bool)
 | 
			
		||||
            lm_label_ids = np.full(shape=(num_samples, seq_len), dtype=np.int32, fill_value=-1)
 | 
			
		||||
            is_nexts = np.zeros(shape=(num_samples,), dtype=np.bool)
 | 
			
		||||
        logging.info(f"Loading training examples for epoch {epoch}")
 | 
			
		||||
        with data_file.open() as f:
 | 
			
		||||
            for i, line in enumerate(tqdm(f, total=num_samples, desc="Training examples")):
 | 
			
		||||
                line = line.strip()
 | 
			
		||||
                example = json.loads(line)
 | 
			
		||||
                features = convert_example_to_features(example, tokenizer, seq_len)
 | 
			
		||||
                input_ids[i] = features.input_ids
 | 
			
		||||
                segment_ids[i] = features.segment_ids
 | 
			
		||||
                input_masks[i] = features.input_mask
 | 
			
		||||
                lm_label_ids[i] = features.lm_label_ids
 | 
			
		||||
                is_nexts[i] = features.is_next
 | 
			
		||||
        assert i == num_samples - 1  # Assert that the sample count metric was true
 | 
			
		||||
        logging.info("Loading complete!")
 | 
			
		||||
        self.num_samples = num_samples
 | 
			
		||||
        self.seq_len = seq_len
 | 
			
		||||
        self.input_ids = input_ids
 | 
			
		||||
        self.input_masks = input_masks
 | 
			
		||||
        self.segment_ids = segment_ids
 | 
			
		||||
        self.lm_label_ids = lm_label_ids
 | 
			
		||||
        self.is_nexts = is_nexts
 | 
			
		||||
 | 
			
		||||
    def __len__(self):
 | 
			
		||||
        return self.num_samples
 | 
			
		||||
 | 
			
		||||
    def __getitem__(self, item):
 | 
			
		||||
        return (torch.tensor(self.input_ids[item].astype(np.int64)),
 | 
			
		||||
                torch.tensor(self.input_masks[item].astype(np.int64)),
 | 
			
		||||
                torch.tensor(self.segment_ids[item].astype(np.int64)),
 | 
			
		||||
                torch.tensor(self.lm_label_ids[item].astype(np.int64)),
 | 
			
		||||
                torch.tensor(self.is_nexts[item].astype(np.int64)))
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def main():
 | 
			
		||||
    parser = ArgumentParser()
 | 
			
		||||
    parser.add_argument('--pregenerated_data', type=Path, required=True)
 | 
			
		||||
    parser.add_argument('--output_dir', type=Path, required=True)
 | 
			
		||||
    parser.add_argument("--bert_model", type=str, required=True, help="Bert pre-trained model selected in the list: bert-base-uncased, "
 | 
			
		||||
                             "bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.")
 | 
			
		||||
    parser.add_argument("--do_lower_case", action="store_true")
 | 
			
		||||
    parser.add_argument("--reduce_memory", action="store_true",
 | 
			
		||||
                        help="Store training data as on-disc memmaps to massively reduce memory usage")
 | 
			
		||||
 | 
			
		||||
    parser.add_argument("--epochs", type=int, default=3, help="Number of epochs to train for")
 | 
			
		||||
    parser.add_argument("--local_rank",
 | 
			
		||||
                        type=int,
 | 
			
		||||
                        default=-1,
 | 
			
		||||
                        help="local_rank for distributed training on gpus")
 | 
			
		||||
    parser.add_argument("--no_cuda",
 | 
			
		||||
                        action='store_true',
 | 
			
		||||
                        help="Whether not to use CUDA when available")
 | 
			
		||||
    parser.add_argument('--gradient_accumulation_steps',
 | 
			
		||||
                        type=int,
 | 
			
		||||
                        default=1,
 | 
			
		||||
                        help="Number of updates steps to accumulate before performing a backward/update pass.")
 | 
			
		||||
    parser.add_argument("--train_batch_size",
 | 
			
		||||
                        default=32,
 | 
			
		||||
                        type=int,
 | 
			
		||||
                        help="Total batch size for training.")
 | 
			
		||||
    parser.add_argument('--fp16',
 | 
			
		||||
                        action='store_true',
 | 
			
		||||
                        help="Whether to use 16-bit float precision instead of 32-bit")
 | 
			
		||||
    parser.add_argument('--loss_scale',
 | 
			
		||||
                        type=float, default=0,
 | 
			
		||||
                        help="Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n"
 | 
			
		||||
                        "0 (default value): dynamic loss scaling.\n"
 | 
			
		||||
                        "Positive power of 2: static loss scaling value.\n")
 | 
			
		||||
    parser.add_argument("--warmup_proportion",
 | 
			
		||||
                        default=0.1,
 | 
			
		||||
                        type=float,
 | 
			
		||||
                        help="Proportion of training to perform linear learning rate warmup for. "
 | 
			
		||||
                             "E.g., 0.1 = 10%% of training.")
 | 
			
		||||
    parser.add_argument("--learning_rate",
 | 
			
		||||
                        default=3e-5,
 | 
			
		||||
                        type=float,
 | 
			
		||||
                        help="The initial learning rate for Adam.")
 | 
			
		||||
    parser.add_argument('--seed',
 | 
			
		||||
                        type=int,
 | 
			
		||||
                        default=42,
 | 
			
		||||
                        help="random seed for initialization")
 | 
			
		||||
    args = parser.parse_args()
 | 
			
		||||
 | 
			
		||||
    assert args.pregenerated_data.is_dir(), \
 | 
			
		||||
        "--pregenerated_data should point to the folder of files made by pregenerate_training_data.py!"
 | 
			
		||||
 | 
			
		||||
    samples_per_epoch = []
 | 
			
		||||
    for i in range(args.epochs):
 | 
			
		||||
        epoch_file = args.pregenerated_data / f"epoch_{i}.json"
 | 
			
		||||
        metrics_file = args.pregenerated_data / f"epoch_{i}_metrics.json"
 | 
			
		||||
        if epoch_file.is_file() and metrics_file.is_file():
 | 
			
		||||
            metrics = json.loads(metrics_file.read_text())
 | 
			
		||||
            samples_per_epoch.append(metrics['num_training_examples'])
 | 
			
		||||
        else:
 | 
			
		||||
            if i == 0:
 | 
			
		||||
                exit("No training data was found!")
 | 
			
		||||
            print(f"Warning! There are fewer epochs of pregenerated data ({i}) than training epochs ({args.epochs}).")
 | 
			
		||||
            print("This script will loop over the available data, but training diversity may be negatively impacted.")
 | 
			
		||||
            num_data_epochs = i
 | 
			
		||||
            break
 | 
			
		||||
    else:
 | 
			
		||||
        num_data_epochs = args.epochs
 | 
			
		||||
 | 
			
		||||
    if args.local_rank == -1 or args.no_cuda:
 | 
			
		||||
        device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
 | 
			
		||||
        n_gpu = torch.cuda.device_count()
 | 
			
		||||
    else:
 | 
			
		||||
        torch.cuda.set_device(args.local_rank)
 | 
			
		||||
        device = torch.device("cuda", args.local_rank)
 | 
			
		||||
        n_gpu = 1
 | 
			
		||||
        # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
 | 
			
		||||
        torch.distributed.init_process_group(backend='nccl')
 | 
			
		||||
    logging.info("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format(
 | 
			
		||||
        device, n_gpu, bool(args.local_rank != -1), args.fp16))
 | 
			
		||||
 | 
			
		||||
    if args.gradient_accumulation_steps < 1:
 | 
			
		||||
        raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
 | 
			
		||||
                            args.gradient_accumulation_steps))
 | 
			
		||||
 | 
			
		||||
    args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps
 | 
			
		||||
 | 
			
		||||
    random.seed(args.seed)
 | 
			
		||||
    np.random.seed(args.seed)
 | 
			
		||||
    torch.manual_seed(args.seed)
 | 
			
		||||
    if n_gpu > 0:
 | 
			
		||||
        torch.cuda.manual_seed_all(args.seed)
 | 
			
		||||
 | 
			
		||||
    if args.output_dir.is_dir() and list(args.output_dir.iterdir()):
 | 
			
		||||
        logging.warning(f"Output directory ({args.output_dir}) already exists and is not empty!")
 | 
			
		||||
    args.output_dir.mkdir(parents=True, exist_ok=True)
 | 
			
		||||
 | 
			
		||||
    tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case)
 | 
			
		||||
 | 
			
		||||
    total_train_examples = 0
 | 
			
		||||
    for i in range(args.epochs):
 | 
			
		||||
        # The modulo takes into account the fact that we may loop over limited epochs of data
 | 
			
		||||
        total_train_examples += samples_per_epoch[i % len(samples_per_epoch)]
 | 
			
		||||
 | 
			
		||||
    num_train_optimization_steps = int(
 | 
			
		||||
        total_train_examples / args.train_batch_size / args.gradient_accumulation_steps)
 | 
			
		||||
    if args.local_rank != -1:
 | 
			
		||||
        num_train_optimization_steps = num_train_optimization_steps // torch.distributed.get_world_size()
 | 
			
		||||
 | 
			
		||||
    # Prepare model
 | 
			
		||||
    model = BertForPreTraining.from_pretrained(args.bert_model)
 | 
			
		||||
    if args.fp16:
 | 
			
		||||
        model.half()
 | 
			
		||||
    model.to(device)
 | 
			
		||||
    if args.local_rank != -1:
 | 
			
		||||
        try:
 | 
			
		||||
            from apex.parallel import DistributedDataParallel as DDP
 | 
			
		||||
        except ImportError:
 | 
			
		||||
            raise ImportError(
 | 
			
		||||
                "Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
 | 
			
		||||
        model = DDP(model)
 | 
			
		||||
    elif n_gpu > 1:
 | 
			
		||||
        model = torch.nn.DataParallel(model)
 | 
			
		||||
 | 
			
		||||
    # Prepare optimizer
 | 
			
		||||
    param_optimizer = list(model.named_parameters())
 | 
			
		||||
    no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
 | 
			
		||||
    optimizer_grouped_parameters = [
 | 
			
		||||
        {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
 | 
			
		||||
         'weight_decay': 0.01},
 | 
			
		||||
        {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
 | 
			
		||||
    ]
 | 
			
		||||
 | 
			
		||||
    if args.fp16:
 | 
			
		||||
        try:
 | 
			
		||||
            from apex.optimizers import FP16_Optimizer
 | 
			
		||||
            from apex.optimizers import FusedAdam
 | 
			
		||||
        except ImportError:
 | 
			
		||||
            raise ImportError(
 | 
			
		||||
                "Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
 | 
			
		||||
 | 
			
		||||
        optimizer = FusedAdam(optimizer_grouped_parameters,
 | 
			
		||||
                              lr=args.learning_rate,
 | 
			
		||||
                              bias_correction=False,
 | 
			
		||||
                              max_grad_norm=1.0)
 | 
			
		||||
        if args.loss_scale == 0:
 | 
			
		||||
            optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
 | 
			
		||||
        else:
 | 
			
		||||
            optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale)
 | 
			
		||||
        warmup_linear = WarmupLinearSchedule(warmup=args.warmup_proportion,
 | 
			
		||||
                                             t_total=num_train_optimization_steps)
 | 
			
		||||
    else:
 | 
			
		||||
        optimizer = BertAdam(optimizer_grouped_parameters,
 | 
			
		||||
                             lr=args.learning_rate,
 | 
			
		||||
                             warmup=args.warmup_proportion,
 | 
			
		||||
                             t_total=num_train_optimization_steps)
 | 
			
		||||
 | 
			
		||||
    global_step = 0
 | 
			
		||||
    logging.info("***** Running training *****")
 | 
			
		||||
    logging.info(f"  Num examples = {total_train_examples}")
 | 
			
		||||
    logging.info("  Batch size = %d", args.train_batch_size)
 | 
			
		||||
    logging.info("  Num steps = %d", num_train_optimization_steps)
 | 
			
		||||
    model.train()
 | 
			
		||||
    for epoch in range(args.epochs):
 | 
			
		||||
        epoch_dataset = PregeneratedDataset(epoch=epoch, training_path=args.pregenerated_data, tokenizer=tokenizer,
 | 
			
		||||
                                            num_data_epochs=num_data_epochs, reduce_memory=args.reduce_memory)
 | 
			
		||||
        if args.local_rank == -1:
 | 
			
		||||
            train_sampler = RandomSampler(epoch_dataset)
 | 
			
		||||
        else:
 | 
			
		||||
            train_sampler = DistributedSampler(epoch_dataset)
 | 
			
		||||
        train_dataloader = DataLoader(epoch_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
 | 
			
		||||
        tr_loss = 0
 | 
			
		||||
        nb_tr_examples, nb_tr_steps = 0, 0
 | 
			
		||||
        with tqdm(total=len(train_dataloader), desc=f"Epoch {epoch}") as pbar:
 | 
			
		||||
            for step, batch in enumerate(train_dataloader):
 | 
			
		||||
                batch = tuple(t.to(device) for t in batch)
 | 
			
		||||
                input_ids, input_mask, segment_ids, lm_label_ids, is_next = batch
 | 
			
		||||
                loss = model(input_ids, segment_ids, input_mask, lm_label_ids, is_next)
 | 
			
		||||
                if n_gpu > 1:
 | 
			
		||||
                    loss = loss.mean() # mean() to average on multi-gpu.
 | 
			
		||||
                if args.gradient_accumulation_steps > 1:
 | 
			
		||||
                    loss = loss / args.gradient_accumulation_steps
 | 
			
		||||
                if args.fp16:
 | 
			
		||||
                    optimizer.backward(loss)
 | 
			
		||||
                else:
 | 
			
		||||
                    loss.backward()
 | 
			
		||||
                tr_loss += loss.item()
 | 
			
		||||
                nb_tr_examples += input_ids.size(0)
 | 
			
		||||
                nb_tr_steps += 1
 | 
			
		||||
                pbar.update(1)
 | 
			
		||||
                mean_loss = tr_loss * args.gradient_accumulation_steps / nb_tr_steps
 | 
			
		||||
                pbar.set_postfix_str(f"Loss: {mean_loss:.5f}")
 | 
			
		||||
                if (step + 1) % args.gradient_accumulation_steps == 0:
 | 
			
		||||
                    if args.fp16:
 | 
			
		||||
                        # modify learning rate with special warm up BERT uses
 | 
			
		||||
                        # if args.fp16 is False, BertAdam is used that handles this automatically
 | 
			
		||||
                        lr_this_step = args.learning_rate * warmup_linear.get_lr(global_step/num_train_optimization_steps,
 | 
			
		||||
                                                                                 args.warmup_proportion)
 | 
			
		||||
                        for param_group in optimizer.param_groups:
 | 
			
		||||
                            param_group['lr'] = lr_this_step
 | 
			
		||||
                    optimizer.step()
 | 
			
		||||
                    optimizer.zero_grad()
 | 
			
		||||
                    global_step += 1
 | 
			
		||||
 | 
			
		||||
    # Save a trained model
 | 
			
		||||
    logging.info("** ** * Saving fine-tuned model ** ** * ")
 | 
			
		||||
    model_to_save = model.module if hasattr(model, 'module') else model  # Only save the model it-self
 | 
			
		||||
    output_model_file = args.output_dir / "pytorch_model.bin"
 | 
			
		||||
    torch.save(model_to_save.state_dict(), str(output_model_file))
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
if __name__ == '__main__':
 | 
			
		||||
    main()
 | 
			
		||||
@ -1,302 +0,0 @@
 | 
			
		||||
from argparse import ArgumentParser
 | 
			
		||||
from pathlib import Path
 | 
			
		||||
from tqdm import tqdm, trange
 | 
			
		||||
from tempfile import TemporaryDirectory
 | 
			
		||||
import shelve
 | 
			
		||||
 | 
			
		||||
from random import random, randrange, randint, shuffle, choice, sample
 | 
			
		||||
from pytorch_pretrained_bert.tokenization import BertTokenizer
 | 
			
		||||
import numpy as np
 | 
			
		||||
import json
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class DocumentDatabase:
 | 
			
		||||
    def __init__(self, reduce_memory=False):
 | 
			
		||||
        if reduce_memory:
 | 
			
		||||
            self.temp_dir = TemporaryDirectory()
 | 
			
		||||
            self.working_dir = Path(self.temp_dir.name)
 | 
			
		||||
            self.document_shelf_filepath = self.working_dir / 'shelf.db'
 | 
			
		||||
            self.document_shelf = shelve.open(str(self.document_shelf_filepath),
 | 
			
		||||
                                              flag='n', protocol=-1)
 | 
			
		||||
            self.documents = None
 | 
			
		||||
        else:
 | 
			
		||||
            self.documents = []
 | 
			
		||||
            self.document_shelf = None
 | 
			
		||||
            self.document_shelf_filepath = None
 | 
			
		||||
            self.temp_dir = None
 | 
			
		||||
        self.doc_lengths = []
 | 
			
		||||
        self.doc_cumsum = None
 | 
			
		||||
        self.cumsum_max = None
 | 
			
		||||
        self.reduce_memory = reduce_memory
 | 
			
		||||
 | 
			
		||||
    def add_document(self, document):
 | 
			
		||||
        if not document:
 | 
			
		||||
            return
 | 
			
		||||
        if self.reduce_memory:
 | 
			
		||||
            current_idx = len(self.doc_lengths)
 | 
			
		||||
            self.document_shelf[str(current_idx)] = document
 | 
			
		||||
        else:
 | 
			
		||||
            self.documents.append(document)
 | 
			
		||||
        self.doc_lengths.append(len(document))
 | 
			
		||||
 | 
			
		||||
    def _precalculate_doc_weights(self):
 | 
			
		||||
        self.doc_cumsum = np.cumsum(self.doc_lengths)
 | 
			
		||||
        self.cumsum_max = self.doc_cumsum[-1]
 | 
			
		||||
 | 
			
		||||
    def sample_doc(self, current_idx, sentence_weighted=True):
 | 
			
		||||
        # Uses the current iteration counter to ensure we don't sample the same doc twice
 | 
			
		||||
        if sentence_weighted:
 | 
			
		||||
            # With sentence weighting, we sample docs proportionally to their sentence length
 | 
			
		||||
            if self.doc_cumsum is None or len(self.doc_cumsum) != len(self.doc_lengths):
 | 
			
		||||
                self._precalculate_doc_weights()
 | 
			
		||||
            rand_start = self.doc_cumsum[current_idx]
 | 
			
		||||
            rand_end = rand_start + self.cumsum_max - self.doc_lengths[current_idx]
 | 
			
		||||
            sentence_index = randrange(rand_start, rand_end) % self.cumsum_max
 | 
			
		||||
            sampled_doc_index = np.searchsorted(self.doc_cumsum, sentence_index, side='right')
 | 
			
		||||
        else:
 | 
			
		||||
            # If we don't use sentence weighting, then every doc has an equal chance to be chosen
 | 
			
		||||
            sampled_doc_index = (current_idx + randrange(1, len(self.doc_lengths))) % len(self.doc_lengths)
 | 
			
		||||
        assert sampled_doc_index != current_idx
 | 
			
		||||
        if self.reduce_memory:
 | 
			
		||||
            return self.document_shelf[str(sampled_doc_index)]
 | 
			
		||||
        else:
 | 
			
		||||
            return self.documents[sampled_doc_index]
 | 
			
		||||
 | 
			
		||||
    def __len__(self):
 | 
			
		||||
        return len(self.doc_lengths)
 | 
			
		||||
 | 
			
		||||
    def __getitem__(self, item):
 | 
			
		||||
        if self.reduce_memory:
 | 
			
		||||
            return self.document_shelf[str(item)]
 | 
			
		||||
        else:
 | 
			
		||||
            return self.documents[item]
 | 
			
		||||
 | 
			
		||||
    def __enter__(self):
 | 
			
		||||
        return self
 | 
			
		||||
 | 
			
		||||
    def __exit__(self, exc_type, exc_val, traceback):
 | 
			
		||||
        if self.document_shelf is not None:
 | 
			
		||||
            self.document_shelf.close()
 | 
			
		||||
        if self.temp_dir is not None:
 | 
			
		||||
            self.temp_dir.cleanup()
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def truncate_seq_pair(tokens_a, tokens_b, max_num_tokens):
 | 
			
		||||
    """Truncates a pair of sequences to a maximum sequence length. Lifted from Google's BERT repo."""
 | 
			
		||||
    while True:
 | 
			
		||||
        total_length = len(tokens_a) + len(tokens_b)
 | 
			
		||||
        if total_length <= max_num_tokens:
 | 
			
		||||
            break
 | 
			
		||||
 | 
			
		||||
        trunc_tokens = tokens_a if len(tokens_a) > len(tokens_b) else tokens_b
 | 
			
		||||
        assert len(trunc_tokens) >= 1
 | 
			
		||||
 | 
			
		||||
        # We want to sometimes truncate from the front and sometimes from the
 | 
			
		||||
        # back to add more randomness and avoid biases.
 | 
			
		||||
        if random() < 0.5:
 | 
			
		||||
            del trunc_tokens[0]
 | 
			
		||||
        else:
 | 
			
		||||
            trunc_tokens.pop()
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def create_masked_lm_predictions(tokens, masked_lm_prob, max_predictions_per_seq, vocab_list):
 | 
			
		||||
    """Creates the predictions for the masked LM objective. This is mostly copied from the Google BERT repo, but
 | 
			
		||||
    with several refactors to clean it up and remove a lot of unnecessary variables."""
 | 
			
		||||
    cand_indices = []
 | 
			
		||||
    for (i, token) in enumerate(tokens):
 | 
			
		||||
        if token == "[CLS]" or token == "[SEP]":
 | 
			
		||||
            continue
 | 
			
		||||
        cand_indices.append(i)
 | 
			
		||||
 | 
			
		||||
    num_to_mask = min(max_predictions_per_seq,
 | 
			
		||||
                      max(1, int(round(len(tokens) * masked_lm_prob))))
 | 
			
		||||
    shuffle(cand_indices)
 | 
			
		||||
    mask_indices = sorted(sample(cand_indices, num_to_mask))
 | 
			
		||||
    masked_token_labels = []
 | 
			
		||||
    for index in mask_indices:
 | 
			
		||||
        # 80% of the time, replace with [MASK]
 | 
			
		||||
        if random() < 0.8:
 | 
			
		||||
            masked_token = "[MASK]"
 | 
			
		||||
        else:
 | 
			
		||||
            # 10% of the time, keep original
 | 
			
		||||
            if random() < 0.5:
 | 
			
		||||
                masked_token = tokens[index]
 | 
			
		||||
            # 10% of the time, replace with random word
 | 
			
		||||
            else:
 | 
			
		||||
                masked_token = choice(vocab_list)
 | 
			
		||||
        masked_token_labels.append(tokens[index])
 | 
			
		||||
        # Once we've saved the true label for that token, we can overwrite it with the masked version
 | 
			
		||||
        tokens[index] = masked_token
 | 
			
		||||
 | 
			
		||||
    return tokens, mask_indices, masked_token_labels
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def create_instances_from_document(
 | 
			
		||||
        doc_database, doc_idx, max_seq_length, short_seq_prob,
 | 
			
		||||
        masked_lm_prob, max_predictions_per_seq, vocab_list):
 | 
			
		||||
    """This code is mostly a duplicate of the equivalent function from Google BERT's repo.
 | 
			
		||||
    However, we make some changes and improvements. Sampling is improved and no longer requires a loop in this function.
 | 
			
		||||
    Also, documents are sampled proportionally to the number of sentences they contain, which means each sentence
 | 
			
		||||
    (rather than each document) has an equal chance of being sampled as a false example for the NextSentence task."""
 | 
			
		||||
    document = doc_database[doc_idx]
 | 
			
		||||
    # Account for [CLS], [SEP], [SEP]
 | 
			
		||||
    max_num_tokens = max_seq_length - 3
 | 
			
		||||
 | 
			
		||||
    # We *usually* want to fill up the entire sequence since we are padding
 | 
			
		||||
    # to `max_seq_length` anyways, so short sequences are generally wasted
 | 
			
		||||
    # computation. However, we *sometimes*
 | 
			
		||||
    # (i.e., short_seq_prob == 0.1 == 10% of the time) want to use shorter
 | 
			
		||||
    # sequences to minimize the mismatch between pre-training and fine-tuning.
 | 
			
		||||
    # The `target_seq_length` is just a rough target however, whereas
 | 
			
		||||
    # `max_seq_length` is a hard limit.
 | 
			
		||||
    target_seq_length = max_num_tokens
 | 
			
		||||
    if random() < short_seq_prob:
 | 
			
		||||
        target_seq_length = randint(2, max_num_tokens)
 | 
			
		||||
 | 
			
		||||
    # We DON'T just concatenate all of the tokens from a document into a long
 | 
			
		||||
    # sequence and choose an arbitrary split point because this would make the
 | 
			
		||||
    # next sentence prediction task too easy. Instead, we split the input into
 | 
			
		||||
    # segments "A" and "B" based on the actual "sentences" provided by the user
 | 
			
		||||
    # input.
 | 
			
		||||
    instances = []
 | 
			
		||||
    current_chunk = []
 | 
			
		||||
    current_length = 0
 | 
			
		||||
    i = 0
 | 
			
		||||
    while i < len(document):
 | 
			
		||||
        segment = document[i]
 | 
			
		||||
        current_chunk.append(segment)
 | 
			
		||||
        current_length += len(segment)
 | 
			
		||||
        if i == len(document) - 1 or current_length >= target_seq_length:
 | 
			
		||||
            if current_chunk:
 | 
			
		||||
                # `a_end` is how many segments from `current_chunk` go into the `A`
 | 
			
		||||
                # (first) sentence.
 | 
			
		||||
                a_end = 1
 | 
			
		||||
                if len(current_chunk) >= 2:
 | 
			
		||||
                    a_end = randrange(1, len(current_chunk))
 | 
			
		||||
 | 
			
		||||
                tokens_a = []
 | 
			
		||||
                for j in range(a_end):
 | 
			
		||||
                    tokens_a.extend(current_chunk[j])
 | 
			
		||||
 | 
			
		||||
                tokens_b = []
 | 
			
		||||
 | 
			
		||||
                # Random next
 | 
			
		||||
                if len(current_chunk) == 1 or random() < 0.5:
 | 
			
		||||
                    is_random_next = True
 | 
			
		||||
                    target_b_length = target_seq_length - len(tokens_a)
 | 
			
		||||
 | 
			
		||||
                    # Sample a random document, with longer docs being sampled more frequently
 | 
			
		||||
                    random_document = doc_database.sample_doc(current_idx=doc_idx, sentence_weighted=True)
 | 
			
		||||
 | 
			
		||||
                    random_start = randrange(0, len(random_document))
 | 
			
		||||
                    for j in range(random_start, len(random_document)):
 | 
			
		||||
                        tokens_b.extend(random_document[j])
 | 
			
		||||
                        if len(tokens_b) >= target_b_length:
 | 
			
		||||
                            break
 | 
			
		||||
                    # We didn't actually use these segments so we "put them back" so
 | 
			
		||||
                    # they don't go to waste.
 | 
			
		||||
                    num_unused_segments = len(current_chunk) - a_end
 | 
			
		||||
                    i -= num_unused_segments
 | 
			
		||||
                # Actual next
 | 
			
		||||
                else:
 | 
			
		||||
                    is_random_next = False
 | 
			
		||||
                    for j in range(a_end, len(current_chunk)):
 | 
			
		||||
                        tokens_b.extend(current_chunk[j])
 | 
			
		||||
                truncate_seq_pair(tokens_a, tokens_b, max_num_tokens)
 | 
			
		||||
 | 
			
		||||
                assert len(tokens_a) >= 1
 | 
			
		||||
                assert len(tokens_b) >= 1
 | 
			
		||||
 | 
			
		||||
                tokens = ["[CLS]"] + tokens_a + ["[SEP]"] + tokens_b + ["[SEP]"]
 | 
			
		||||
                # The segment IDs are 0 for the [CLS] token, the A tokens and the first [SEP]
 | 
			
		||||
                # They are 1 for the B tokens and the final [SEP]
 | 
			
		||||
                segment_ids = [0 for _ in range(len(tokens_a) + 2)] + [1 for _ in range(len(tokens_b) + 1)]
 | 
			
		||||
 | 
			
		||||
                tokens, masked_lm_positions, masked_lm_labels = create_masked_lm_predictions(
 | 
			
		||||
                    tokens, masked_lm_prob, max_predictions_per_seq, vocab_list)
 | 
			
		||||
 | 
			
		||||
                instance = {
 | 
			
		||||
                    "tokens": tokens,
 | 
			
		||||
                    "segment_ids": segment_ids,
 | 
			
		||||
                    "is_random_next": is_random_next,
 | 
			
		||||
                    "masked_lm_positions": masked_lm_positions,
 | 
			
		||||
                    "masked_lm_labels": masked_lm_labels}
 | 
			
		||||
                instances.append(instance)
 | 
			
		||||
            current_chunk = []
 | 
			
		||||
            current_length = 0
 | 
			
		||||
        i += 1
 | 
			
		||||
 | 
			
		||||
    return instances
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def main():
 | 
			
		||||
    parser = ArgumentParser()
 | 
			
		||||
    parser.add_argument('--train_corpus', type=Path, required=True)
 | 
			
		||||
    parser.add_argument("--output_dir", type=Path, required=True)
 | 
			
		||||
    parser.add_argument("--bert_model", type=str, required=True,
 | 
			
		||||
                        choices=["bert-base-uncased", "bert-large-uncased", "bert-base-cased",
 | 
			
		||||
                                 "bert-base-multilingual", "bert-base-chinese"])
 | 
			
		||||
    parser.add_argument("--do_lower_case", action="store_true")
 | 
			
		||||
 | 
			
		||||
    parser.add_argument("--reduce_memory", action="store_true",
 | 
			
		||||
                        help="Reduce memory usage for large datasets by keeping data on disc rather than in memory")
 | 
			
		||||
 | 
			
		||||
    parser.add_argument("--epochs_to_generate", type=int, default=3,
 | 
			
		||||
                        help="Number of epochs of data to pregenerate")
 | 
			
		||||
    parser.add_argument("--max_seq_len", type=int, default=128)
 | 
			
		||||
    parser.add_argument("--short_seq_prob", type=float, default=0.1,
 | 
			
		||||
                        help="Probability of making a short sentence as a training example")
 | 
			
		||||
    parser.add_argument("--masked_lm_prob", type=float, default=0.15,
 | 
			
		||||
                        help="Probability of masking each token for the LM task")
 | 
			
		||||
    parser.add_argument("--max_predictions_per_seq", type=int, default=20,
 | 
			
		||||
                        help="Maximum number of tokens to mask in each sequence")
 | 
			
		||||
 | 
			
		||||
    args = parser.parse_args()
 | 
			
		||||
 | 
			
		||||
    tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case)
 | 
			
		||||
    vocab_list = list(tokenizer.vocab.keys())
 | 
			
		||||
    with DocumentDatabase(reduce_memory=args.reduce_memory) as docs:
 | 
			
		||||
        with args.train_corpus.open() as f:
 | 
			
		||||
            doc = []
 | 
			
		||||
            for line in tqdm(f, desc="Loading Dataset", unit=" lines"):
 | 
			
		||||
                line = line.strip()
 | 
			
		||||
                if line == "":
 | 
			
		||||
                    docs.add_document(doc)
 | 
			
		||||
                    doc = []
 | 
			
		||||
                else:
 | 
			
		||||
                    tokens = tokenizer.tokenize(line)
 | 
			
		||||
                    doc.append(tokens)
 | 
			
		||||
            if doc:
 | 
			
		||||
                docs.add_document(doc)  # If the last doc didn't end on a newline, make sure it still gets added
 | 
			
		||||
        if len(docs) <= 1:
 | 
			
		||||
            exit("ERROR: No document breaks were found in the input file! These are necessary to allow the script to "
 | 
			
		||||
                 "ensure that random NextSentences are not sampled from the same document. Please add blank lines to "
 | 
			
		||||
                 "indicate breaks between documents in your input file. If your dataset does not contain multiple "
 | 
			
		||||
                 "documents, blank lines can be inserted at any natural boundary, such as the ends of chapters, "
 | 
			
		||||
                 "sections or paragraphs.")
 | 
			
		||||
 | 
			
		||||
        args.output_dir.mkdir(exist_ok=True)
 | 
			
		||||
        for epoch in trange(args.epochs_to_generate, desc="Epoch"):
 | 
			
		||||
            epoch_filename = args.output_dir / f"epoch_{epoch}.json"
 | 
			
		||||
            num_instances = 0
 | 
			
		||||
            with epoch_filename.open('w') as epoch_file:
 | 
			
		||||
                for doc_idx in trange(len(docs), desc="Document"):
 | 
			
		||||
                    doc_instances = create_instances_from_document(
 | 
			
		||||
                        docs, doc_idx, max_seq_length=args.max_seq_len, short_seq_prob=args.short_seq_prob,
 | 
			
		||||
                        masked_lm_prob=args.masked_lm_prob, max_predictions_per_seq=args.max_predictions_per_seq,
 | 
			
		||||
                        vocab_list=vocab_list)
 | 
			
		||||
                    doc_instances = [json.dumps(instance) for instance in doc_instances]
 | 
			
		||||
                    for instance in doc_instances:
 | 
			
		||||
                        epoch_file.write(instance + '\n')
 | 
			
		||||
                        num_instances += 1
 | 
			
		||||
            metrics_file = args.output_dir / f"epoch_{epoch}_metrics.json"
 | 
			
		||||
            with metrics_file.open('w') as metrics_file:
 | 
			
		||||
                metrics = {
 | 
			
		||||
                    "num_training_examples": num_instances,
 | 
			
		||||
                    "max_seq_len": args.max_seq_len
 | 
			
		||||
                }
 | 
			
		||||
                metrics_file.write(json.dumps(metrics))
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
if __name__ == '__main__':
 | 
			
		||||
    main()
 | 
			
		||||
@ -1,645 +0,0 @@
 | 
			
		||||
# coding=utf-8
 | 
			
		||||
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
 | 
			
		||||
# Copyright (c) 2018, NVIDIA CORPORATION.  All rights reserved.
 | 
			
		||||
#
 | 
			
		||||
# Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
# you may not use this file except in compliance with the License.
 | 
			
		||||
# You may obtain a copy of the License at
 | 
			
		||||
#
 | 
			
		||||
#     http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
#
 | 
			
		||||
# Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
# distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
# See the License for the specific language governing permissions and
 | 
			
		||||
# limitations under the License.
 | 
			
		||||
"""BERT finetuning runner."""
 | 
			
		||||
 | 
			
		||||
from __future__ import absolute_import, division, print_function, unicode_literals
 | 
			
		||||
 | 
			
		||||
import argparse
 | 
			
		||||
import logging
 | 
			
		||||
import os
 | 
			
		||||
import random
 | 
			
		||||
from io import open
 | 
			
		||||
 | 
			
		||||
import numpy as np
 | 
			
		||||
import torch
 | 
			
		||||
from torch.utils.data import DataLoader, Dataset, RandomSampler
 | 
			
		||||
from torch.utils.data.distributed import DistributedSampler
 | 
			
		||||
from tqdm import tqdm, trange
 | 
			
		||||
 | 
			
		||||
from pytorch_pretrained_bert.modeling import BertForPreTraining
 | 
			
		||||
from pytorch_pretrained_bert.tokenization import BertTokenizer
 | 
			
		||||
from pytorch_pretrained_bert.optimization import BertAdam, WarmupLinearSchedule
 | 
			
		||||
 | 
			
		||||
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s -   %(message)s',
 | 
			
		||||
                    datefmt='%m/%d/%Y %H:%M:%S',
 | 
			
		||||
                    level=logging.INFO)
 | 
			
		||||
logger = logging.getLogger(__name__)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class BERTDataset(Dataset):
 | 
			
		||||
    def __init__(self, corpus_path, tokenizer, seq_len, encoding="utf-8", corpus_lines=None, on_memory=True):
 | 
			
		||||
        self.vocab = tokenizer.vocab
 | 
			
		||||
        self.tokenizer = tokenizer
 | 
			
		||||
        self.seq_len = seq_len
 | 
			
		||||
        self.on_memory = on_memory
 | 
			
		||||
        self.corpus_lines = corpus_lines  # number of non-empty lines in input corpus
 | 
			
		||||
        self.corpus_path = corpus_path
 | 
			
		||||
        self.encoding = encoding
 | 
			
		||||
        self.current_doc = 0  # to avoid random sentence from same doc
 | 
			
		||||
 | 
			
		||||
        # for loading samples directly from file
 | 
			
		||||
        self.sample_counter = 0  # used to keep track of full epochs on file
 | 
			
		||||
        self.line_buffer = None  # keep second sentence of a pair in memory and use as first sentence in next pair
 | 
			
		||||
 | 
			
		||||
        # for loading samples in memory
 | 
			
		||||
        self.current_random_doc = 0
 | 
			
		||||
        self.num_docs = 0
 | 
			
		||||
        self.sample_to_doc = [] # map sample index to doc and line
 | 
			
		||||
 | 
			
		||||
        # load samples into memory
 | 
			
		||||
        if on_memory:
 | 
			
		||||
            self.all_docs = []
 | 
			
		||||
            doc = []
 | 
			
		||||
            self.corpus_lines = 0
 | 
			
		||||
            with open(corpus_path, "r", encoding=encoding) as f:
 | 
			
		||||
                for line in tqdm(f, desc="Loading Dataset", total=corpus_lines):
 | 
			
		||||
                    line = line.strip()
 | 
			
		||||
                    if line == "":
 | 
			
		||||
                        self.all_docs.append(doc)
 | 
			
		||||
                        doc = []
 | 
			
		||||
                        #remove last added sample because there won't be a subsequent line anymore in the doc
 | 
			
		||||
                        self.sample_to_doc.pop()
 | 
			
		||||
                    else:
 | 
			
		||||
                        #store as one sample
 | 
			
		||||
                        sample = {"doc_id": len(self.all_docs),
 | 
			
		||||
                                  "line": len(doc)}
 | 
			
		||||
                        self.sample_to_doc.append(sample)
 | 
			
		||||
                        doc.append(line)
 | 
			
		||||
                        self.corpus_lines = self.corpus_lines + 1
 | 
			
		||||
 | 
			
		||||
            # if last row in file is not empty
 | 
			
		||||
            if self.all_docs[-1] != doc:
 | 
			
		||||
                self.all_docs.append(doc)
 | 
			
		||||
                self.sample_to_doc.pop()
 | 
			
		||||
 | 
			
		||||
            self.num_docs = len(self.all_docs)
 | 
			
		||||
 | 
			
		||||
        # load samples later lazily from disk
 | 
			
		||||
        else:
 | 
			
		||||
            if self.corpus_lines is None:
 | 
			
		||||
                with open(corpus_path, "r", encoding=encoding) as f:
 | 
			
		||||
                    self.corpus_lines = 0
 | 
			
		||||
                    for line in tqdm(f, desc="Loading Dataset", total=corpus_lines):
 | 
			
		||||
                        if line.strip() == "":
 | 
			
		||||
                            self.num_docs += 1
 | 
			
		||||
                        else:
 | 
			
		||||
                            self.corpus_lines += 1
 | 
			
		||||
 | 
			
		||||
                    # if doc does not end with empty line
 | 
			
		||||
                    if line.strip() != "":
 | 
			
		||||
                        self.num_docs += 1
 | 
			
		||||
 | 
			
		||||
            self.file = open(corpus_path, "r", encoding=encoding)
 | 
			
		||||
            self.random_file = open(corpus_path, "r", encoding=encoding)
 | 
			
		||||
 | 
			
		||||
    def __len__(self):
 | 
			
		||||
        # last line of doc won't be used, because there's no "nextSentence". Additionally, we start counting at 0.
 | 
			
		||||
        return self.corpus_lines - self.num_docs - 1
 | 
			
		||||
 | 
			
		||||
    def __getitem__(self, item):
 | 
			
		||||
        cur_id = self.sample_counter
 | 
			
		||||
        self.sample_counter += 1
 | 
			
		||||
        if not self.on_memory:
 | 
			
		||||
            # after one epoch we start again from beginning of file
 | 
			
		||||
            if cur_id != 0 and (cur_id % len(self) == 0):
 | 
			
		||||
                self.file.close()
 | 
			
		||||
                self.file = open(self.corpus_path, "r", encoding=self.encoding)
 | 
			
		||||
 | 
			
		||||
        t1, t2, is_next_label = self.random_sent(item)
 | 
			
		||||
 | 
			
		||||
        # tokenize
 | 
			
		||||
        tokens_a = self.tokenizer.tokenize(t1)
 | 
			
		||||
        tokens_b = self.tokenizer.tokenize(t2)
 | 
			
		||||
 | 
			
		||||
        # combine to one sample
 | 
			
		||||
        cur_example = InputExample(guid=cur_id, tokens_a=tokens_a, tokens_b=tokens_b, is_next=is_next_label)
 | 
			
		||||
 | 
			
		||||
        # transform sample to features
 | 
			
		||||
        cur_features = convert_example_to_features(cur_example, self.seq_len, self.tokenizer)
 | 
			
		||||
 | 
			
		||||
        cur_tensors = (torch.tensor(cur_features.input_ids),
 | 
			
		||||
                       torch.tensor(cur_features.input_mask),
 | 
			
		||||
                       torch.tensor(cur_features.segment_ids),
 | 
			
		||||
                       torch.tensor(cur_features.lm_label_ids),
 | 
			
		||||
                       torch.tensor(cur_features.is_next))
 | 
			
		||||
 | 
			
		||||
        return cur_tensors
 | 
			
		||||
 | 
			
		||||
    def random_sent(self, index):
 | 
			
		||||
        """
 | 
			
		||||
        Get one sample from corpus consisting of two sentences. With prob. 50% these are two subsequent sentences
 | 
			
		||||
        from one doc. With 50% the second sentence will be a random one from another doc.
 | 
			
		||||
        :param index: int, index of sample.
 | 
			
		||||
        :return: (str, str, int), sentence 1, sentence 2, isNextSentence Label
 | 
			
		||||
        """
 | 
			
		||||
        t1, t2 = self.get_corpus_line(index)
 | 
			
		||||
        if random.random() > 0.5:
 | 
			
		||||
            label = 0
 | 
			
		||||
        else:
 | 
			
		||||
            t2 = self.get_random_line()
 | 
			
		||||
            label = 1
 | 
			
		||||
 | 
			
		||||
        assert len(t1) > 0
 | 
			
		||||
        assert len(t2) > 0
 | 
			
		||||
        return t1, t2, label
 | 
			
		||||
 | 
			
		||||
    def get_corpus_line(self, item):
 | 
			
		||||
        """
 | 
			
		||||
        Get one sample from corpus consisting of a pair of two subsequent lines from the same doc.
 | 
			
		||||
        :param item: int, index of sample.
 | 
			
		||||
        :return: (str, str), two subsequent sentences from corpus
 | 
			
		||||
        """
 | 
			
		||||
        t1 = ""
 | 
			
		||||
        t2 = ""
 | 
			
		||||
        assert item < self.corpus_lines
 | 
			
		||||
        if self.on_memory:
 | 
			
		||||
            sample = self.sample_to_doc[item]
 | 
			
		||||
            t1 = self.all_docs[sample["doc_id"]][sample["line"]]
 | 
			
		||||
            t2 = self.all_docs[sample["doc_id"]][sample["line"]+1]
 | 
			
		||||
            # used later to avoid random nextSentence from same doc
 | 
			
		||||
            self.current_doc = sample["doc_id"]
 | 
			
		||||
            return t1, t2
 | 
			
		||||
        else:
 | 
			
		||||
            if self.line_buffer is None:
 | 
			
		||||
                # read first non-empty line of file
 | 
			
		||||
                while t1 == "" :
 | 
			
		||||
                    t1 = next(self.file).strip()
 | 
			
		||||
                    t2 = next(self.file).strip()
 | 
			
		||||
            else:
 | 
			
		||||
                # use t2 from previous iteration as new t1
 | 
			
		||||
                t1 = self.line_buffer
 | 
			
		||||
                t2 = next(self.file).strip()
 | 
			
		||||
                # skip empty rows that are used for separating documents and keep track of current doc id
 | 
			
		||||
                while t2 == "" or t1 == "":
 | 
			
		||||
                    t1 = next(self.file).strip()
 | 
			
		||||
                    t2 = next(self.file).strip()
 | 
			
		||||
                    self.current_doc = self.current_doc+1
 | 
			
		||||
            self.line_buffer = t2
 | 
			
		||||
 | 
			
		||||
        assert t1 != ""
 | 
			
		||||
        assert t2 != ""
 | 
			
		||||
        return t1, t2
 | 
			
		||||
 | 
			
		||||
    def get_random_line(self):
 | 
			
		||||
        """
 | 
			
		||||
        Get random line from another document for nextSentence task.
 | 
			
		||||
        :return: str, content of one line
 | 
			
		||||
        """
 | 
			
		||||
        # Similar to original tf repo: This outer loop should rarely go for more than one iteration for large
 | 
			
		||||
        # corpora. However, just to be careful, we try to make sure that
 | 
			
		||||
        # the random document is not the same as the document we're processing.
 | 
			
		||||
        for _ in range(10):
 | 
			
		||||
            if self.on_memory:
 | 
			
		||||
                rand_doc_idx = random.randint(0, len(self.all_docs)-1)
 | 
			
		||||
                rand_doc = self.all_docs[rand_doc_idx]
 | 
			
		||||
                line = rand_doc[random.randrange(len(rand_doc))]
 | 
			
		||||
            else:
 | 
			
		||||
                rand_index = random.randint(1, self.corpus_lines if self.corpus_lines < 1000 else 1000)
 | 
			
		||||
                #pick random line
 | 
			
		||||
                for _ in range(rand_index):
 | 
			
		||||
                    line = self.get_next_line()
 | 
			
		||||
            #check if our picked random line is really from another doc like we want it to be
 | 
			
		||||
            if self.current_random_doc != self.current_doc:
 | 
			
		||||
                break
 | 
			
		||||
        return line
 | 
			
		||||
 | 
			
		||||
    def get_next_line(self):
 | 
			
		||||
        """ Gets next line of random_file and starts over when reaching end of file"""
 | 
			
		||||
        try:
 | 
			
		||||
            line = next(self.random_file).strip()
 | 
			
		||||
            #keep track of which document we are currently looking at to later avoid having the same doc as t1
 | 
			
		||||
            if line == "":
 | 
			
		||||
                self.current_random_doc = self.current_random_doc + 1
 | 
			
		||||
                line = next(self.random_file).strip()
 | 
			
		||||
        except StopIteration:
 | 
			
		||||
            self.random_file.close()
 | 
			
		||||
            self.random_file = open(self.corpus_path, "r", encoding=self.encoding)
 | 
			
		||||
            line = next(self.random_file).strip()
 | 
			
		||||
        return line
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class InputExample(object):
 | 
			
		||||
    """A single training/test example for the language model."""
 | 
			
		||||
 | 
			
		||||
    def __init__(self, guid, tokens_a, tokens_b=None, is_next=None, lm_labels=None):
 | 
			
		||||
        """Constructs a InputExample.
 | 
			
		||||
 | 
			
		||||
        Args:
 | 
			
		||||
            guid: Unique id for the example.
 | 
			
		||||
            tokens_a: string. The untokenized text of the first sequence. For single
 | 
			
		||||
            sequence tasks, only this sequence must be specified.
 | 
			
		||||
            tokens_b: (Optional) string. The untokenized text of the second sequence.
 | 
			
		||||
            Only must be specified for sequence pair tasks.
 | 
			
		||||
            label: (Optional) string. The label of the example. This should be
 | 
			
		||||
            specified for train and dev examples, but not for test examples.
 | 
			
		||||
        """
 | 
			
		||||
        self.guid = guid
 | 
			
		||||
        self.tokens_a = tokens_a
 | 
			
		||||
        self.tokens_b = tokens_b
 | 
			
		||||
        self.is_next = is_next  # nextSentence
 | 
			
		||||
        self.lm_labels = lm_labels  # masked words for language model
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class InputFeatures(object):
 | 
			
		||||
    """A single set of features of data."""
 | 
			
		||||
 | 
			
		||||
    def __init__(self, input_ids, input_mask, segment_ids, is_next, lm_label_ids):
 | 
			
		||||
        self.input_ids = input_ids
 | 
			
		||||
        self.input_mask = input_mask
 | 
			
		||||
        self.segment_ids = segment_ids
 | 
			
		||||
        self.is_next = is_next
 | 
			
		||||
        self.lm_label_ids = lm_label_ids
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def random_word(tokens, tokenizer):
 | 
			
		||||
    """
 | 
			
		||||
    Masking some random tokens for Language Model task with probabilities as in the original BERT paper.
 | 
			
		||||
    :param tokens: list of str, tokenized sentence.
 | 
			
		||||
    :param tokenizer: Tokenizer, object used for tokenization (we need it's vocab here)
 | 
			
		||||
    :return: (list of str, list of int), masked tokens and related labels for LM prediction
 | 
			
		||||
    """
 | 
			
		||||
    output_label = []
 | 
			
		||||
 | 
			
		||||
    for i, token in enumerate(tokens):
 | 
			
		||||
        prob = random.random()
 | 
			
		||||
        # mask token with 15% probability
 | 
			
		||||
        if prob < 0.15:
 | 
			
		||||
            prob /= 0.15
 | 
			
		||||
 | 
			
		||||
            # 80% randomly change token to mask token
 | 
			
		||||
            if prob < 0.8:
 | 
			
		||||
                tokens[i] = "[MASK]"
 | 
			
		||||
 | 
			
		||||
            # 10% randomly change token to random token
 | 
			
		||||
            elif prob < 0.9:
 | 
			
		||||
                tokens[i] = random.choice(list(tokenizer.vocab.items()))[0]
 | 
			
		||||
 | 
			
		||||
            # -> rest 10% randomly keep current token
 | 
			
		||||
 | 
			
		||||
            # append current token to output (we will predict these later)
 | 
			
		||||
            try:
 | 
			
		||||
                output_label.append(tokenizer.vocab[token])
 | 
			
		||||
            except KeyError:
 | 
			
		||||
                # For unknown words (should not occur with BPE vocab)
 | 
			
		||||
                output_label.append(tokenizer.vocab["[UNK]"])
 | 
			
		||||
                logger.warning("Cannot find token '{}' in vocab. Using [UNK] insetad".format(token))
 | 
			
		||||
        else:
 | 
			
		||||
            # no masking token (will be ignored by loss function later)
 | 
			
		||||
            output_label.append(-1)
 | 
			
		||||
 | 
			
		||||
    return tokens, output_label
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def convert_example_to_features(example, max_seq_length, tokenizer):
 | 
			
		||||
    """
 | 
			
		||||
    Convert a raw sample (pair of sentences as tokenized strings) into a proper training sample with
 | 
			
		||||
    IDs, LM labels, input_mask, CLS and SEP tokens etc.
 | 
			
		||||
    :param example: InputExample, containing sentence input as strings and is_next label
 | 
			
		||||
    :param max_seq_length: int, maximum length of sequence.
 | 
			
		||||
    :param tokenizer: Tokenizer
 | 
			
		||||
    :return: InputFeatures, containing all inputs and labels of one sample as IDs (as used for model training)
 | 
			
		||||
    """
 | 
			
		||||
    tokens_a = example.tokens_a
 | 
			
		||||
    tokens_b = example.tokens_b
 | 
			
		||||
    # Modifies `tokens_a` and `tokens_b` in place so that the total
 | 
			
		||||
    # length is less than the specified length.
 | 
			
		||||
    # Account for [CLS], [SEP], [SEP] with "- 3"
 | 
			
		||||
    _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
 | 
			
		||||
 | 
			
		||||
    tokens_a, t1_label = random_word(tokens_a, tokenizer)
 | 
			
		||||
    tokens_b, t2_label = random_word(tokens_b, tokenizer)
 | 
			
		||||
    # concatenate lm labels and account for CLS, SEP, SEP
 | 
			
		||||
    lm_label_ids = ([-1] + t1_label + [-1] + t2_label + [-1])
 | 
			
		||||
 | 
			
		||||
    # The convention in BERT is:
 | 
			
		||||
    # (a) For sequence pairs:
 | 
			
		||||
    #  tokens:   [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
 | 
			
		||||
    #  type_ids: 0   0  0    0    0     0       0 0    1  1  1  1   1 1
 | 
			
		||||
    # (b) For single sequences:
 | 
			
		||||
    #  tokens:   [CLS] the dog is hairy . [SEP]
 | 
			
		||||
    #  type_ids: 0   0   0   0  0     0 0
 | 
			
		||||
    #
 | 
			
		||||
    # Where "type_ids" are used to indicate whether this is the first
 | 
			
		||||
    # sequence or the second sequence. The embedding vectors for `type=0` and
 | 
			
		||||
    # `type=1` were learned during pre-training and are added to the wordpiece
 | 
			
		||||
    # embedding vector (and position vector). This is not *strictly* necessary
 | 
			
		||||
    # since the [SEP] token unambigiously separates the sequences, but it makes
 | 
			
		||||
    # it easier for the model to learn the concept of sequences.
 | 
			
		||||
    #
 | 
			
		||||
    # For classification tasks, the first vector (corresponding to [CLS]) is
 | 
			
		||||
    # used as as the "sentence vector". Note that this only makes sense because
 | 
			
		||||
    # the entire model is fine-tuned.
 | 
			
		||||
    tokens = []
 | 
			
		||||
    segment_ids = []
 | 
			
		||||
    tokens.append("[CLS]")
 | 
			
		||||
    segment_ids.append(0)
 | 
			
		||||
    for token in tokens_a:
 | 
			
		||||
        tokens.append(token)
 | 
			
		||||
        segment_ids.append(0)
 | 
			
		||||
    tokens.append("[SEP]")
 | 
			
		||||
    segment_ids.append(0)
 | 
			
		||||
 | 
			
		||||
    assert len(tokens_b) > 0
 | 
			
		||||
    for token in tokens_b:
 | 
			
		||||
        tokens.append(token)
 | 
			
		||||
        segment_ids.append(1)
 | 
			
		||||
    tokens.append("[SEP]")
 | 
			
		||||
    segment_ids.append(1)
 | 
			
		||||
 | 
			
		||||
    input_ids = tokenizer.convert_tokens_to_ids(tokens)
 | 
			
		||||
 | 
			
		||||
    # The mask has 1 for real tokens and 0 for padding tokens. Only real
 | 
			
		||||
    # tokens are attended to.
 | 
			
		||||
    input_mask = [1] * len(input_ids)
 | 
			
		||||
 | 
			
		||||
    # Zero-pad up to the sequence length.
 | 
			
		||||
    while len(input_ids) < max_seq_length:
 | 
			
		||||
        input_ids.append(0)
 | 
			
		||||
        input_mask.append(0)
 | 
			
		||||
        segment_ids.append(0)
 | 
			
		||||
        lm_label_ids.append(-1)
 | 
			
		||||
 | 
			
		||||
    assert len(input_ids) == max_seq_length
 | 
			
		||||
    assert len(input_mask) == max_seq_length
 | 
			
		||||
    assert len(segment_ids) == max_seq_length
 | 
			
		||||
    assert len(lm_label_ids) == max_seq_length
 | 
			
		||||
 | 
			
		||||
    if example.guid < 5:
 | 
			
		||||
        logger.info("*** Example ***")
 | 
			
		||||
        logger.info("guid: %s" % (example.guid))
 | 
			
		||||
        logger.info("tokens: %s" % " ".join(
 | 
			
		||||
                [str(x) for x in tokens]))
 | 
			
		||||
        logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
 | 
			
		||||
        logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
 | 
			
		||||
        logger.info(
 | 
			
		||||
                "segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
 | 
			
		||||
        logger.info("LM label: %s " % (lm_label_ids))
 | 
			
		||||
        logger.info("Is next sentence label: %s " % (example.is_next))
 | 
			
		||||
 | 
			
		||||
    features = InputFeatures(input_ids=input_ids,
 | 
			
		||||
                             input_mask=input_mask,
 | 
			
		||||
                             segment_ids=segment_ids,
 | 
			
		||||
                             lm_label_ids=lm_label_ids,
 | 
			
		||||
                             is_next=example.is_next)
 | 
			
		||||
    return features
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def main():
 | 
			
		||||
    parser = argparse.ArgumentParser()
 | 
			
		||||
 | 
			
		||||
    ## Required parameters
 | 
			
		||||
    parser.add_argument("--train_corpus",
 | 
			
		||||
                        default=None,
 | 
			
		||||
                        type=str,
 | 
			
		||||
                        required=True,
 | 
			
		||||
                        help="The input train corpus.")
 | 
			
		||||
    parser.add_argument("--bert_model", default=None, type=str, required=True,
 | 
			
		||||
                        help="Bert pre-trained model selected in the list: bert-base-uncased, "
 | 
			
		||||
                             "bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.")
 | 
			
		||||
    parser.add_argument("--output_dir",
 | 
			
		||||
                        default=None,
 | 
			
		||||
                        type=str,
 | 
			
		||||
                        required=True,
 | 
			
		||||
                        help="The output directory where the model checkpoints will be written.")
 | 
			
		||||
 | 
			
		||||
    ## Other parameters
 | 
			
		||||
    parser.add_argument("--max_seq_length",
 | 
			
		||||
                        default=128,
 | 
			
		||||
                        type=int,
 | 
			
		||||
                        help="The maximum total input sequence length after WordPiece tokenization. \n"
 | 
			
		||||
                             "Sequences longer than this will be truncated, and sequences shorter \n"
 | 
			
		||||
                             "than this will be padded.")
 | 
			
		||||
    parser.add_argument("--do_train",
 | 
			
		||||
                        action='store_true',
 | 
			
		||||
                        help="Whether to run training.")
 | 
			
		||||
    parser.add_argument("--train_batch_size",
 | 
			
		||||
                        default=32,
 | 
			
		||||
                        type=int,
 | 
			
		||||
                        help="Total batch size for training.")
 | 
			
		||||
    parser.add_argument("--learning_rate",
 | 
			
		||||
                        default=3e-5,
 | 
			
		||||
                        type=float,
 | 
			
		||||
                        help="The initial learning rate for Adam.")
 | 
			
		||||
    parser.add_argument("--num_train_epochs",
 | 
			
		||||
                        default=3.0,
 | 
			
		||||
                        type=float,
 | 
			
		||||
                        help="Total number of training epochs to perform.")
 | 
			
		||||
    parser.add_argument("--warmup_proportion",
 | 
			
		||||
                        default=0.1,
 | 
			
		||||
                        type=float,
 | 
			
		||||
                        help="Proportion of training to perform linear learning rate warmup for. "
 | 
			
		||||
                             "E.g., 0.1 = 10%% of training.")
 | 
			
		||||
    parser.add_argument("--no_cuda",
 | 
			
		||||
                        action='store_true',
 | 
			
		||||
                        help="Whether not to use CUDA when available")
 | 
			
		||||
    parser.add_argument("--on_memory",
 | 
			
		||||
                        action='store_true',
 | 
			
		||||
                        help="Whether to load train samples into memory or use disk")
 | 
			
		||||
    parser.add_argument("--do_lower_case",
 | 
			
		||||
                        action='store_true',
 | 
			
		||||
                        help="Whether to lower case the input text. True for uncased models, False for cased models.")
 | 
			
		||||
    parser.add_argument("--local_rank",
 | 
			
		||||
                        type=int,
 | 
			
		||||
                        default=-1,
 | 
			
		||||
                        help="local_rank for distributed training on gpus")
 | 
			
		||||
    parser.add_argument('--seed',
 | 
			
		||||
                        type=int,
 | 
			
		||||
                        default=42,
 | 
			
		||||
                        help="random seed for initialization")
 | 
			
		||||
    parser.add_argument('--gradient_accumulation_steps',
 | 
			
		||||
                        type=int,
 | 
			
		||||
                        default=1,
 | 
			
		||||
                        help="Number of updates steps to accumualte before performing a backward/update pass.")
 | 
			
		||||
    parser.add_argument('--fp16',
 | 
			
		||||
                        action='store_true',
 | 
			
		||||
                        help="Whether to use 16-bit float precision instead of 32-bit")
 | 
			
		||||
    parser.add_argument('--loss_scale',
 | 
			
		||||
                        type = float, default = 0,
 | 
			
		||||
                        help = "Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n"
 | 
			
		||||
                        "0 (default value): dynamic loss scaling.\n"
 | 
			
		||||
                        "Positive power of 2: static loss scaling value.\n")
 | 
			
		||||
 | 
			
		||||
    args = parser.parse_args()
 | 
			
		||||
 | 
			
		||||
    if args.local_rank == -1 or args.no_cuda:
 | 
			
		||||
        device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
 | 
			
		||||
        n_gpu = torch.cuda.device_count()
 | 
			
		||||
    else:
 | 
			
		||||
        torch.cuda.set_device(args.local_rank)
 | 
			
		||||
        device = torch.device("cuda", args.local_rank)
 | 
			
		||||
        n_gpu = 1
 | 
			
		||||
        # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
 | 
			
		||||
        torch.distributed.init_process_group(backend='nccl')
 | 
			
		||||
    logger.info("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format(
 | 
			
		||||
        device, n_gpu, bool(args.local_rank != -1), args.fp16))
 | 
			
		||||
 | 
			
		||||
    if args.gradient_accumulation_steps < 1:
 | 
			
		||||
        raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
 | 
			
		||||
                            args.gradient_accumulation_steps))
 | 
			
		||||
 | 
			
		||||
    args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps
 | 
			
		||||
 | 
			
		||||
    random.seed(args.seed)
 | 
			
		||||
    np.random.seed(args.seed)
 | 
			
		||||
    torch.manual_seed(args.seed)
 | 
			
		||||
    if n_gpu > 0:
 | 
			
		||||
        torch.cuda.manual_seed_all(args.seed)
 | 
			
		||||
 | 
			
		||||
    if not args.do_train:
 | 
			
		||||
        raise ValueError("Training is currently the only implemented execution option. Please set `do_train`.")
 | 
			
		||||
 | 
			
		||||
    if os.path.exists(args.output_dir) and os.listdir(args.output_dir):
 | 
			
		||||
        raise ValueError("Output directory ({}) already exists and is not empty.".format(args.output_dir))
 | 
			
		||||
    if not os.path.exists(args.output_dir):
 | 
			
		||||
        os.makedirs(args.output_dir)
 | 
			
		||||
 | 
			
		||||
    tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case)
 | 
			
		||||
 | 
			
		||||
    #train_examples = None
 | 
			
		||||
    num_train_optimization_steps = None
 | 
			
		||||
    if args.do_train:
 | 
			
		||||
        print("Loading Train Dataset", args.train_corpus)
 | 
			
		||||
        train_dataset = BERTDataset(args.train_corpus, tokenizer, seq_len=args.max_seq_length,
 | 
			
		||||
                                    corpus_lines=None, on_memory=args.on_memory)
 | 
			
		||||
        num_train_optimization_steps = int(
 | 
			
		||||
            len(train_dataset) / args.train_batch_size / args.gradient_accumulation_steps) * args.num_train_epochs
 | 
			
		||||
        if args.local_rank != -1:
 | 
			
		||||
            num_train_optimization_steps = num_train_optimization_steps // torch.distributed.get_world_size()
 | 
			
		||||
 | 
			
		||||
    # Prepare model
 | 
			
		||||
    model = BertForPreTraining.from_pretrained(args.bert_model)
 | 
			
		||||
    if args.fp16:
 | 
			
		||||
        model.half()
 | 
			
		||||
    model.to(device)
 | 
			
		||||
    if args.local_rank != -1:
 | 
			
		||||
        try:
 | 
			
		||||
            from apex.parallel import DistributedDataParallel as DDP
 | 
			
		||||
        except ImportError:
 | 
			
		||||
            raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
 | 
			
		||||
        model = DDP(model)
 | 
			
		||||
    elif n_gpu > 1:
 | 
			
		||||
        model = torch.nn.DataParallel(model)
 | 
			
		||||
 | 
			
		||||
    # Prepare optimizer
 | 
			
		||||
    param_optimizer = list(model.named_parameters())
 | 
			
		||||
    no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
 | 
			
		||||
    optimizer_grouped_parameters = [
 | 
			
		||||
        {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
 | 
			
		||||
        {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
 | 
			
		||||
        ]
 | 
			
		||||
 | 
			
		||||
    if args.fp16:
 | 
			
		||||
        try:
 | 
			
		||||
            from apex.optimizers import FP16_Optimizer
 | 
			
		||||
            from apex.optimizers import FusedAdam
 | 
			
		||||
        except ImportError:
 | 
			
		||||
            raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
 | 
			
		||||
 | 
			
		||||
        optimizer = FusedAdam(optimizer_grouped_parameters,
 | 
			
		||||
                              lr=args.learning_rate,
 | 
			
		||||
                              bias_correction=False,
 | 
			
		||||
                              max_grad_norm=1.0)
 | 
			
		||||
        if args.loss_scale == 0:
 | 
			
		||||
            optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
 | 
			
		||||
        else:
 | 
			
		||||
            optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale)
 | 
			
		||||
        warmup_linear = WarmupLinearSchedule(warmup=args.warmup_proportion,
 | 
			
		||||
                                             t_total=num_train_optimization_steps)
 | 
			
		||||
 | 
			
		||||
    else:
 | 
			
		||||
        optimizer = BertAdam(optimizer_grouped_parameters,
 | 
			
		||||
                             lr=args.learning_rate,
 | 
			
		||||
                             warmup=args.warmup_proportion,
 | 
			
		||||
                             t_total=num_train_optimization_steps)
 | 
			
		||||
 | 
			
		||||
    global_step = 0
 | 
			
		||||
    if args.do_train:
 | 
			
		||||
        logger.info("***** Running training *****")
 | 
			
		||||
        logger.info("  Num examples = %d", len(train_dataset))
 | 
			
		||||
        logger.info("  Batch size = %d", args.train_batch_size)
 | 
			
		||||
        logger.info("  Num steps = %d", num_train_optimization_steps)
 | 
			
		||||
 | 
			
		||||
        if args.local_rank == -1:
 | 
			
		||||
            train_sampler = RandomSampler(train_dataset)
 | 
			
		||||
        else:
 | 
			
		||||
            #TODO: check if this works with current data generator from disk that relies on next(file)
 | 
			
		||||
            # (it doesn't return item back by index)
 | 
			
		||||
            train_sampler = DistributedSampler(train_dataset)
 | 
			
		||||
        train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
 | 
			
		||||
 | 
			
		||||
        model.train()
 | 
			
		||||
        for _ in trange(int(args.num_train_epochs), desc="Epoch"):
 | 
			
		||||
            tr_loss = 0
 | 
			
		||||
            nb_tr_examples, nb_tr_steps = 0, 0
 | 
			
		||||
            for step, batch in enumerate(tqdm(train_dataloader, desc="Iteration")):
 | 
			
		||||
                batch = tuple(t.to(device) for t in batch)
 | 
			
		||||
                input_ids, input_mask, segment_ids, lm_label_ids, is_next = batch
 | 
			
		||||
                loss = model(input_ids, segment_ids, input_mask, lm_label_ids, is_next)
 | 
			
		||||
                if n_gpu > 1:
 | 
			
		||||
                    loss = loss.mean() # mean() to average on multi-gpu.
 | 
			
		||||
                if args.gradient_accumulation_steps > 1:
 | 
			
		||||
                    loss = loss / args.gradient_accumulation_steps
 | 
			
		||||
                if args.fp16:
 | 
			
		||||
                    optimizer.backward(loss)
 | 
			
		||||
                else:
 | 
			
		||||
                    loss.backward()
 | 
			
		||||
                tr_loss += loss.item()
 | 
			
		||||
                nb_tr_examples += input_ids.size(0)
 | 
			
		||||
                nb_tr_steps += 1
 | 
			
		||||
                if (step + 1) % args.gradient_accumulation_steps == 0:
 | 
			
		||||
                    if args.fp16:
 | 
			
		||||
                        # modify learning rate with special warm up BERT uses
 | 
			
		||||
                        # if args.fp16 is False, BertAdam is used that handles this automatically
 | 
			
		||||
                        lr_this_step = args.learning_rate * warmup_linear.get_lr(global_step/num_train_optimization_steps,
 | 
			
		||||
                                                                                 args.warmup_proportion)
 | 
			
		||||
                        for param_group in optimizer.param_groups:
 | 
			
		||||
                            param_group['lr'] = lr_this_step
 | 
			
		||||
                    optimizer.step()
 | 
			
		||||
                    optimizer.zero_grad()
 | 
			
		||||
                    global_step += 1
 | 
			
		||||
 | 
			
		||||
        # Save a trained model
 | 
			
		||||
        logger.info("** ** * Saving fine - tuned model ** ** * ")
 | 
			
		||||
        model_to_save = model.module if hasattr(model, 'module') else model  # Only save the model it-self
 | 
			
		||||
        output_model_file = os.path.join(args.output_dir, "pytorch_model.bin")
 | 
			
		||||
        if args.do_train:
 | 
			
		||||
            torch.save(model_to_save.state_dict(), output_model_file)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
 | 
			
		||||
    """Truncates a sequence pair in place to the maximum length."""
 | 
			
		||||
 | 
			
		||||
    # This is a simple heuristic which will always truncate the longer sequence
 | 
			
		||||
    # one token at a time. This makes more sense than truncating an equal percent
 | 
			
		||||
    # of tokens from each, since if one sequence is very short then each token
 | 
			
		||||
    # that's truncated likely contains more information than a longer sequence.
 | 
			
		||||
    while True:
 | 
			
		||||
        total_length = len(tokens_a) + len(tokens_b)
 | 
			
		||||
        if total_length <= max_length:
 | 
			
		||||
            break
 | 
			
		||||
        if len(tokens_a) > len(tokens_b):
 | 
			
		||||
            tokens_a.pop()
 | 
			
		||||
        else:
 | 
			
		||||
            tokens_b.pop()
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def accuracy(out, labels):
 | 
			
		||||
    outputs = np.argmax(out, axis=1)
 | 
			
		||||
    return np.sum(outputs == labels)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
if __name__ == "__main__":
 | 
			
		||||
    main()
 | 
			
		||||
							
								
								
									
										2
									
								
								examples/requirements.txt
									
									
									
									
									
										Normal file
									
								
							
							
						
						@ -0,0 +1,2 @@
 | 
			
		||||
tensorboardX
 | 
			
		||||
scikit-learn
 | 
			
		||||
							
								
								
									
										348
									
								
								examples/run_bertology.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						@ -0,0 +1,348 @@
 | 
			
		||||
#!/usr/bin/env python3
 | 
			
		||||
# Copyright 2018 CMU and The HuggingFace Inc. team.
 | 
			
		||||
#
 | 
			
		||||
# Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
# you may not use this file except in compliance with the License.
 | 
			
		||||
# You may obtain a copy of the License at
 | 
			
		||||
#
 | 
			
		||||
#     http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
#
 | 
			
		||||
# Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
# distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
# See the License for the specific language governing permissions and
 | 
			
		||||
# limitations under the License.
 | 
			
		||||
""" Bertology: this script shows how you can explore the internals of the models in the library to:
 | 
			
		||||
    - compute the entropy of the head attentions
 | 
			
		||||
    - compute the importance of each head
 | 
			
		||||
    - prune (remove) the low importance head.
 | 
			
		||||
    Some parts of this script are adapted from the code of Michel et al. (http://arxiv.org/abs/1905.10650)
 | 
			
		||||
    which is available at https://github.com/pmichel31415/are-16-heads-really-better-than-1
 | 
			
		||||
"""
 | 
			
		||||
import os
 | 
			
		||||
import argparse
 | 
			
		||||
import logging
 | 
			
		||||
from datetime import timedelta, datetime
 | 
			
		||||
from tqdm import tqdm
 | 
			
		||||
 | 
			
		||||
import numpy as np
 | 
			
		||||
 | 
			
		||||
import torch
 | 
			
		||||
from torch.utils.data import DataLoader, SequentialSampler, TensorDataset, Subset
 | 
			
		||||
from torch.utils.data.distributed import DistributedSampler
 | 
			
		||||
from torch.nn import CrossEntropyLoss, MSELoss
 | 
			
		||||
 | 
			
		||||
from transformers import (WEIGHTS_NAME,
 | 
			
		||||
                                  BertConfig, BertForSequenceClassification, BertTokenizer,
 | 
			
		||||
                                  XLMConfig, XLMForSequenceClassification, XLMTokenizer,
 | 
			
		||||
                                  XLNetConfig, XLNetForSequenceClassification, XLNetTokenizer)
 | 
			
		||||
 | 
			
		||||
from run_glue import set_seed, load_and_cache_examples, ALL_MODELS, MODEL_CLASSES
 | 
			
		||||
 | 
			
		||||
from utils_glue import (compute_metrics, convert_examples_to_features,
 | 
			
		||||
                        output_modes, processors)
 | 
			
		||||
 | 
			
		||||
logger = logging.getLogger(__name__)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def entropy(p):
 | 
			
		||||
    """ Compute the entropy of a probability distribution """
 | 
			
		||||
    plogp = p * torch.log(p)
 | 
			
		||||
    plogp[p == 0] = 0
 | 
			
		||||
    return -plogp.sum(dim=-1)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def print_2d_tensor(tensor):
 | 
			
		||||
    """ Print a 2D tensor """
 | 
			
		||||
    logger.info("lv, h >\t" + "\t".join(f"{x + 1}" for x in range(len(tensor))))
 | 
			
		||||
    for row in range(len(tensor)):
 | 
			
		||||
        if tensor.dtype != torch.long:
 | 
			
		||||
            logger.info(f"layer {row + 1}:\t" + "\t".join(f"{x:.5f}" for x in tensor[row].cpu().data))
 | 
			
		||||
        else:
 | 
			
		||||
            logger.info(f"layer {row + 1}:\t" + "\t".join(f"{x:d}" for x in tensor[row].cpu().data))
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def compute_heads_importance(args, model, eval_dataloader, compute_entropy=True, compute_importance=True, head_mask=None):
 | 
			
		||||
    """ This method shows how to compute:
 | 
			
		||||
        - head attention entropy
 | 
			
		||||
        - head importance scores according to http://arxiv.org/abs/1905.10650
 | 
			
		||||
    """
 | 
			
		||||
    # Prepare our tensors
 | 
			
		||||
    n_layers, n_heads = model.bert.config.num_hidden_layers, model.bert.config.num_attention_heads
 | 
			
		||||
    head_importance = torch.zeros(n_layers, n_heads).to(args.device)
 | 
			
		||||
    attn_entropy = torch.zeros(n_layers, n_heads).to(args.device)
 | 
			
		||||
 | 
			
		||||
    if head_mask is None:
 | 
			
		||||
        head_mask = torch.ones(n_layers, n_heads).to(args.device)
 | 
			
		||||
    head_mask.requires_grad_(requires_grad=True)
 | 
			
		||||
    preds = None
 | 
			
		||||
    labels = None
 | 
			
		||||
    tot_tokens = 0.0
 | 
			
		||||
 | 
			
		||||
    for step, batch in enumerate(tqdm(eval_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])):
 | 
			
		||||
        batch = tuple(t.to(args.device) for t in batch)
 | 
			
		||||
        input_ids, input_mask, segment_ids, label_ids = batch
 | 
			
		||||
 | 
			
		||||
        # Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
 | 
			
		||||
        outputs = model(input_ids, token_type_ids=segment_ids, attention_mask=input_mask, labels=label_ids, head_mask=head_mask)
 | 
			
		||||
        loss, logits, all_attentions = outputs[0], outputs[1], outputs[-1]  # Loss and logits are the first, attention the last
 | 
			
		||||
        loss.backward()  # Backpropagate to populate the gradients in the head mask
 | 
			
		||||
 | 
			
		||||
        if compute_entropy:
 | 
			
		||||
            for layer, attn in enumerate(all_attentions):
 | 
			
		||||
                masked_entropy = entropy(attn.detach()) * input_mask.float().unsqueeze(1)
 | 
			
		||||
                attn_entropy[layer] += masked_entropy.sum(-1).sum(0).detach()
 | 
			
		||||
 | 
			
		||||
        if compute_importance:
 | 
			
		||||
            head_importance += head_mask.grad.abs().detach()
 | 
			
		||||
 | 
			
		||||
        # Also store our logits/labels if we want to compute metrics afterwards
 | 
			
		||||
        if preds is None:
 | 
			
		||||
            preds = logits.detach().cpu().numpy()
 | 
			
		||||
            labels = label_ids.detach().cpu().numpy()
 | 
			
		||||
        else:
 | 
			
		||||
            preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
 | 
			
		||||
            labels = np.append(labels, label_ids.detach().cpu().numpy(), axis=0)
 | 
			
		||||
 | 
			
		||||
        tot_tokens += input_mask.float().detach().sum().data
 | 
			
		||||
 | 
			
		||||
    # Normalize
 | 
			
		||||
    attn_entropy /= tot_tokens
 | 
			
		||||
    head_importance /= tot_tokens
 | 
			
		||||
    # Layerwise importance normalization
 | 
			
		||||
    if not args.dont_normalize_importance_by_layer:
 | 
			
		||||
        exponent = 2
 | 
			
		||||
        norm_by_layer = torch.pow(torch.pow(head_importance, exponent).sum(-1), 1/exponent)
 | 
			
		||||
        head_importance /= norm_by_layer.unsqueeze(-1) + 1e-20
 | 
			
		||||
 | 
			
		||||
    if not args.dont_normalize_global_importance:
 | 
			
		||||
        head_importance = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
 | 
			
		||||
 | 
			
		||||
    # Print/save matrices
 | 
			
		||||
    np.save(os.path.join(args.output_dir, 'attn_entropy.npy'), attn_entropy.detach().cpu().numpy())
 | 
			
		||||
    np.save(os.path.join(args.output_dir, 'head_importance.npy'), head_importance.detach().cpu().numpy())
 | 
			
		||||
 | 
			
		||||
    logger.info("Attention entropies")
 | 
			
		||||
    print_2d_tensor(attn_entropy)
 | 
			
		||||
    logger.info("Head importance scores")
 | 
			
		||||
    print_2d_tensor(head_importance)
 | 
			
		||||
    logger.info("Head ranked by importance scores")
 | 
			
		||||
    head_ranks = torch.zeros(head_importance.numel(), dtype=torch.long, device=args.device)
 | 
			
		||||
    head_ranks[head_importance.view(-1).sort(descending=True)[1]] = torch.arange(head_importance.numel(), device=args.device)
 | 
			
		||||
    head_ranks = head_ranks.view_as(head_importance)
 | 
			
		||||
    print_2d_tensor(head_ranks)
 | 
			
		||||
 | 
			
		||||
    return attn_entropy, head_importance, preds, labels
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def mask_heads(args, model, eval_dataloader):
 | 
			
		||||
    """ This method shows how to mask head (set some heads to zero), to test the effect on the network,
 | 
			
		||||
        based on the head importance scores, as described in Michel et al. (http://arxiv.org/abs/1905.10650)
 | 
			
		||||
    """
 | 
			
		||||
    _, head_importance, preds, labels = compute_heads_importance(args, model, eval_dataloader, compute_entropy=False)
 | 
			
		||||
    preds = np.argmax(preds, axis=1) if args.output_mode == "classification" else np.squeeze(preds)
 | 
			
		||||
    original_score = compute_metrics(args.task_name, preds, labels)[args.metric_name]
 | 
			
		||||
    logger.info("Pruning: original score: %f, threshold: %f", original_score, original_score * args.masking_threshold)
 | 
			
		||||
 | 
			
		||||
    new_head_mask = torch.ones_like(head_importance)
 | 
			
		||||
    num_to_mask = max(1, int(new_head_mask.numel() * args.masking_amount))
 | 
			
		||||
 | 
			
		||||
    current_score = original_score
 | 
			
		||||
    while current_score >= original_score * args.masking_threshold:
 | 
			
		||||
        head_mask = new_head_mask.clone() # save current head mask
 | 
			
		||||
        # heads from least important to most - keep only not-masked heads
 | 
			
		||||
        head_importance[head_mask == 0.0] = float('Inf')
 | 
			
		||||
        current_heads_to_mask = head_importance.view(-1).sort()[1]
 | 
			
		||||
 | 
			
		||||
        if len(current_heads_to_mask) <= num_to_mask:
 | 
			
		||||
            break
 | 
			
		||||
 | 
			
		||||
        # mask heads
 | 
			
		||||
        current_heads_to_mask = current_heads_to_mask[:num_to_mask]
 | 
			
		||||
        logger.info("Heads to mask: %s", str(current_heads_to_mask.tolist()))
 | 
			
		||||
        new_head_mask = new_head_mask.view(-1)
 | 
			
		||||
        new_head_mask[current_heads_to_mask] = 0.0
 | 
			
		||||
        new_head_mask = new_head_mask.view_as(head_mask)
 | 
			
		||||
        print_2d_tensor(new_head_mask)
 | 
			
		||||
 | 
			
		||||
        # Compute metric and head importance again
 | 
			
		||||
        _, head_importance, preds, labels = compute_heads_importance(args, model, eval_dataloader, compute_entropy=False, head_mask=new_head_mask)
 | 
			
		||||
        preds = np.argmax(preds, axis=1) if args.output_mode == "classification" else np.squeeze(preds)
 | 
			
		||||
        current_score = compute_metrics(args.task_name, preds, labels)[args.metric_name]
 | 
			
		||||
        logger.info("Masking: current score: %f, remaning heads %d (%.1f percents)", current_score, new_head_mask.sum(), new_head_mask.sum()/new_head_mask.numel() * 100)
 | 
			
		||||
 | 
			
		||||
    logger.info("Final head mask")
 | 
			
		||||
    print_2d_tensor(head_mask)
 | 
			
		||||
    np.save(os.path.join(args.output_dir, 'head_mask.npy'), head_mask.detach().cpu().numpy())
 | 
			
		||||
 | 
			
		||||
    return head_mask
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def prune_heads(args, model, eval_dataloader, head_mask):
 | 
			
		||||
    """ This method shows how to prune head (remove heads weights) based on
 | 
			
		||||
        the head importance scores as described in Michel et al. (http://arxiv.org/abs/1905.10650)
 | 
			
		||||
    """
 | 
			
		||||
    # Try pruning and test time speedup
 | 
			
		||||
    # Pruning is like masking but we actually remove the masked weights
 | 
			
		||||
    before_time = datetime.now()
 | 
			
		||||
    _, _, preds, labels = compute_heads_importance(args, model, eval_dataloader,
 | 
			
		||||
                                                   compute_entropy=False, compute_importance=False, head_mask=head_mask)
 | 
			
		||||
    preds = np.argmax(preds, axis=1) if args.output_mode == "classification" else np.squeeze(preds)
 | 
			
		||||
    score_masking = compute_metrics(args.task_name, preds, labels)[args.metric_name]
 | 
			
		||||
    original_time = datetime.now() - before_time
 | 
			
		||||
 | 
			
		||||
    original_num_params = sum(p.numel() for p in model.parameters())
 | 
			
		||||
    heads_to_prune = dict((layer, (1 - head_mask[layer].long()).nonzero().tolist()) for layer in range(len(head_mask)))
 | 
			
		||||
    assert sum(len(h) for h in heads_to_prune.values()) == (1 - head_mask.long()).sum().item()
 | 
			
		||||
    model.prune_heads(heads_to_prune)
 | 
			
		||||
    pruned_num_params = sum(p.numel() for p in model.parameters())
 | 
			
		||||
 | 
			
		||||
    before_time = datetime.now()
 | 
			
		||||
    _, _, preds, labels = compute_heads_importance(args, model, eval_dataloader,
 | 
			
		||||
                                                    compute_entropy=False, compute_importance=False, head_mask=None)
 | 
			
		||||
    preds = np.argmax(preds, axis=1) if args.output_mode == "classification" else np.squeeze(preds)
 | 
			
		||||
    score_pruning = compute_metrics(args.task_name, preds, labels)[args.metric_name]
 | 
			
		||||
    new_time = datetime.now() - before_time
 | 
			
		||||
 | 
			
		||||
    logger.info("Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)", original_num_params, pruned_num_params, pruned_num_params/original_num_params * 100)
 | 
			
		||||
    logger.info("Pruning: score with masking: %f score with pruning: %f", score_masking, score_pruning)
 | 
			
		||||
    logger.info("Pruning: speed ratio (new timing / original timing): %f percents", original_time/new_time * 100)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def main():
 | 
			
		||||
    parser = argparse.ArgumentParser()
 | 
			
		||||
    ## Required parameters
 | 
			
		||||
    parser.add_argument("--data_dir", default=None, type=str, required=True,
 | 
			
		||||
                        help="The input data dir. Should contain the .tsv files (or other data files) for the task.")
 | 
			
		||||
    parser.add_argument("--model_name_or_path", default=None, type=str, required=True,
 | 
			
		||||
                        help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(
 | 
			
		||||
                            ALL_MODELS))
 | 
			
		||||
    parser.add_argument("--task_name", default=None, type=str, required=True,
 | 
			
		||||
                        help="The name of the task to train selected in the list: " + ", ".join(processors.keys()))
 | 
			
		||||
    parser.add_argument("--output_dir", default=None, type=str, required=True,
 | 
			
		||||
                        help="The output directory where the model predictions and checkpoints will be written.")
 | 
			
		||||
 | 
			
		||||
    ## Other parameters
 | 
			
		||||
    parser.add_argument("--config_name", default="", type=str,
 | 
			
		||||
                        help="Pretrained config name or path if not the same as model_name_or_path")
 | 
			
		||||
    parser.add_argument("--tokenizer_name", default="", type=str,
 | 
			
		||||
                        help="Pretrained tokenizer name or path if not the same as model_name_or_path")
 | 
			
		||||
    parser.add_argument("--cache_dir", default="", type=str,
 | 
			
		||||
                        help="Where do you want to store the pre-trained models downloaded from s3")
 | 
			
		||||
    parser.add_argument("--data_subset", type=int, default=-1,
 | 
			
		||||
                        help="If > 0: limit the data to a subset of data_subset instances.")
 | 
			
		||||
    parser.add_argument("--overwrite_output_dir", action='store_true',
 | 
			
		||||
                        help="Whether to overwrite data in output directory")
 | 
			
		||||
 | 
			
		||||
    parser.add_argument("--dont_normalize_importance_by_layer", action='store_true',
 | 
			
		||||
                        help="Don't normalize importance score by layers")
 | 
			
		||||
    parser.add_argument("--dont_normalize_global_importance", action='store_true',
 | 
			
		||||
                        help="Don't normalize all importance scores between 0 and 1")
 | 
			
		||||
 | 
			
		||||
    parser.add_argument("--try_masking", action='store_true',
 | 
			
		||||
                        help="Whether to try to mask head until a threshold of accuracy.")
 | 
			
		||||
    parser.add_argument("--masking_threshold", default=0.9, type=float,
 | 
			
		||||
                        help="masking threshold in term of metrics (stop masking when metric < threshold * original metric value).")
 | 
			
		||||
    parser.add_argument("--masking_amount", default=0.1, type=float,
 | 
			
		||||
                        help="Amount to heads to masking at each masking step.")
 | 
			
		||||
    parser.add_argument("--metric_name", default="acc", type=str,
 | 
			
		||||
                        help="Metric to use for head masking.")
 | 
			
		||||
 | 
			
		||||
    parser.add_argument("--max_seq_length", default=128, type=int,
 | 
			
		||||
                        help="The maximum total input sequence length after WordPiece tokenization. \n"
 | 
			
		||||
                             "Sequences longer than this will be truncated, sequences shorter padded.")
 | 
			
		||||
    parser.add_argument("--batch_size", default=1, type=int, help="Batch size.")
 | 
			
		||||
 | 
			
		||||
    parser.add_argument("--seed", type=int, default=42)
 | 
			
		||||
    parser.add_argument("--local_rank", type=int, default=-1, help="local_rank for distributed training on gpus")
 | 
			
		||||
    parser.add_argument("--no_cuda", action='store_true', help="Whether not to use CUDA when available")
 | 
			
		||||
    parser.add_argument('--server_ip', type=str, default='', help="Can be used for distant debugging.")
 | 
			
		||||
    parser.add_argument('--server_port', type=str, default='', help="Can be used for distant debugging.")
 | 
			
		||||
    args = parser.parse_args()
 | 
			
		||||
 | 
			
		||||
    if args.server_ip and args.server_port:
 | 
			
		||||
        # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
 | 
			
		||||
        import ptvsd
 | 
			
		||||
        print("Waiting for debugger attach")
 | 
			
		||||
        ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
 | 
			
		||||
        ptvsd.wait_for_attach()
 | 
			
		||||
 | 
			
		||||
    # Setup devices and distributed training
 | 
			
		||||
    if args.local_rank == -1 or args.no_cuda:
 | 
			
		||||
        args.device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
 | 
			
		||||
        args.n_gpu = torch.cuda.device_count()
 | 
			
		||||
    else:
 | 
			
		||||
        torch.cuda.set_device(args.local_rank)
 | 
			
		||||
        args.device = torch.device("cuda", args.local_rank)
 | 
			
		||||
        args.n_gpu = 1
 | 
			
		||||
        torch.distributed.init_process_group(backend='nccl')  # Initializes the distributed backend
 | 
			
		||||
 | 
			
		||||
    # Setup logging
 | 
			
		||||
    logging.basicConfig(level = logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
 | 
			
		||||
    logger.info("device: {} n_gpu: {}, distributed: {}".format(args.device, args.n_gpu, bool(args.local_rank != -1)))
 | 
			
		||||
 | 
			
		||||
    # Set seeds
 | 
			
		||||
    set_seed(args)
 | 
			
		||||
 | 
			
		||||
    # Prepare GLUE task
 | 
			
		||||
    args.task_name = args.task_name.lower()
 | 
			
		||||
    if args.task_name not in processors:
 | 
			
		||||
        raise ValueError("Task not found: %s" % (args.task_name))
 | 
			
		||||
    processor = processors[args.task_name]()
 | 
			
		||||
    args.output_mode = output_modes[args.task_name]
 | 
			
		||||
    label_list = processor.get_labels()
 | 
			
		||||
    num_labels = len(label_list)
 | 
			
		||||
 | 
			
		||||
    # Load pretrained model and tokenizer
 | 
			
		||||
    if args.local_rank not in [-1, 0]:
 | 
			
		||||
        torch.distributed.barrier()  # Make sure only the first process in distributed training will download model & vocab
 | 
			
		||||
 | 
			
		||||
    args.model_type = ""
 | 
			
		||||
    for key in MODEL_CLASSES:
 | 
			
		||||
        if key in args.model_name_or_path.lower():
 | 
			
		||||
            args.model_type = key  # take the first match in model types
 | 
			
		||||
            break
 | 
			
		||||
    config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
 | 
			
		||||
    config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path,
 | 
			
		||||
                                          num_labels=num_labels, finetuning_task=args.task_name,
 | 
			
		||||
                                          output_attentions=True)
 | 
			
		||||
    tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name else args.model_name_or_path)
 | 
			
		||||
    model = model_class.from_pretrained(args.model_name_or_path, from_tf=bool('.ckpt' in args.model_name_or_path), config=config)
 | 
			
		||||
 | 
			
		||||
    if args.local_rank == 0:
 | 
			
		||||
        torch.distributed.barrier()  # Make sure only the first process in distributed training will download model & vocab
 | 
			
		||||
 | 
			
		||||
    # Distributed and parallel training
 | 
			
		||||
    model.to(args.device)
 | 
			
		||||
    if args.local_rank != -1:
 | 
			
		||||
        model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],
 | 
			
		||||
                                                          output_device=args.local_rank,
 | 
			
		||||
                                                          find_unused_parameters=True)
 | 
			
		||||
    elif args.n_gpu > 1:
 | 
			
		||||
        model = torch.nn.DataParallel(model)
 | 
			
		||||
 | 
			
		||||
    # Print/save training arguments
 | 
			
		||||
    torch.save(args, os.path.join(args.output_dir, 'run_args.bin'))
 | 
			
		||||
    logger.info("Training/evaluation parameters %s", args)
 | 
			
		||||
 | 
			
		||||
    # Prepare dataset for the GLUE task
 | 
			
		||||
    eval_data = load_and_cache_examples(args, args.task_name, tokenizer, evaluate=True)
 | 
			
		||||
    if args.data_subset > 0:
 | 
			
		||||
        eval_data = Subset(eval_data, list(range(min(args.data_subset, len(eval_data)))))
 | 
			
		||||
    eval_sampler = SequentialSampler(eval_data) if args.local_rank == -1 else DistributedSampler(eval_data)
 | 
			
		||||
    eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.batch_size)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    # Compute head entropy and importance score
 | 
			
		||||
    compute_heads_importance(args, model, eval_dataloader)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    # Try head masking (set heads to zero until the score goes under a threshole)
 | 
			
		||||
    # and head pruning (remove masked heads and see the effect on the network)
 | 
			
		||||
    if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
 | 
			
		||||
        head_mask = mask_heads(args, model, eval_dataloader)
 | 
			
		||||
        prune_heads(args, model, eval_dataloader, head_mask)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
if __name__ == '__main__':
 | 
			
		||||
    main()
 | 
			
		||||
							
								
								
									
										195
									
								
								examples/run_generation.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						@ -0,0 +1,195 @@
 | 
			
		||||
#!/usr/bin/env python3
 | 
			
		||||
# coding=utf-8
 | 
			
		||||
# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
 | 
			
		||||
# Copyright (c) 2018, NVIDIA CORPORATION.  All rights reserved.
 | 
			
		||||
#
 | 
			
		||||
# Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
# you may not use this file except in compliance with the License.
 | 
			
		||||
# You may obtain a copy of the License at
 | 
			
		||||
#
 | 
			
		||||
#     http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
#
 | 
			
		||||
# Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
# distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
# See the License for the specific language governing permissions and
 | 
			
		||||
# limitations under the License.
 | 
			
		||||
""" Conditional text generation with the auto-regressive models of the library (GPT/GPT-2/Transformer-XL/XLNet)
 | 
			
		||||
"""
 | 
			
		||||
from __future__ import absolute_import, division, print_function, unicode_literals
 | 
			
		||||
 | 
			
		||||
import argparse
 | 
			
		||||
import logging
 | 
			
		||||
from tqdm import trange
 | 
			
		||||
 | 
			
		||||
import torch
 | 
			
		||||
import torch.nn.functional as F
 | 
			
		||||
import numpy as np
 | 
			
		||||
 | 
			
		||||
from transformers import GPT2Config, OpenAIGPTConfig, XLNetConfig, TransfoXLConfig
 | 
			
		||||
 | 
			
		||||
from transformers import GPT2LMHeadModel, GPT2Tokenizer
 | 
			
		||||
from transformers import OpenAIGPTLMHeadModel, OpenAIGPTTokenizer
 | 
			
		||||
from transformers import XLNetLMHeadModel, XLNetTokenizer
 | 
			
		||||
from transformers import TransfoXLLMHeadModel, TransfoXLTokenizer
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s -   %(message)s',
 | 
			
		||||
                    datefmt = '%m/%d/%Y %H:%M:%S',
 | 
			
		||||
                    level = logging.INFO)
 | 
			
		||||
logger = logging.getLogger(__name__)
 | 
			
		||||
 | 
			
		||||
MAX_LENGTH = int(10000)  # Hardcoded max length to avoid infinite loop
 | 
			
		||||
 | 
			
		||||
ALL_MODELS = sum((tuple(conf.pretrained_config_archive_map.keys()) for conf in (GPT2Config, OpenAIGPTConfig, XLNetConfig, TransfoXLConfig)), ())
 | 
			
		||||
 | 
			
		||||
MODEL_CLASSES = {
 | 
			
		||||
    'gpt2': (GPT2LMHeadModel, GPT2Tokenizer),
 | 
			
		||||
    'openai-gpt': (OpenAIGPTLMHeadModel, OpenAIGPTTokenizer),
 | 
			
		||||
    'xlnet': (XLNetLMHeadModel, XLNetTokenizer),
 | 
			
		||||
    'transfo-xl': (TransfoXLLMHeadModel, TransfoXLTokenizer),
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
# Padding text to help Transformer-XL and XLNet with short prompts as proposed by Aman Rusia
 | 
			
		||||
# in https://github.com/rusiaaman/XLNet-gen#methodology
 | 
			
		||||
# and https://medium.com/@amanrusia/xlnet-speaks-comparison-to-gpt-2-ea1a4e9ba39e
 | 
			
		||||
PADDING_TEXT = """ In 1991, the remains of Russian Tsar Nicholas II and his family
 | 
			
		||||
(except for Alexei and Maria) are discovered.
 | 
			
		||||
The voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the
 | 
			
		||||
remainder of the story. 1883 Western Siberia,
 | 
			
		||||
a young Grigori Rasputin is asked by his father and a group of men to perform magic.
 | 
			
		||||
Rasputin has a vision and denounces one of the men as a horse thief. Although his
 | 
			
		||||
father initially slaps him for making such an accusation, Rasputin watches as the
 | 
			
		||||
man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
 | 
			
		||||
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous,
 | 
			
		||||
with people, even a bishop, begging for his blessing. <eod> </s> <eos>"""
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def set_seed(args):
 | 
			
		||||
    np.random.seed(args.seed)
 | 
			
		||||
    torch.manual_seed(args.seed)
 | 
			
		||||
    if args.n_gpu > 0:
 | 
			
		||||
        torch.cuda.manual_seed_all(args.seed)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def top_k_top_p_filtering(logits, top_k=0, top_p=0.0, filter_value=-float('Inf')):
 | 
			
		||||
    """ Filter a distribution of logits using top-k and/or nucleus (top-p) filtering
 | 
			
		||||
        Args:
 | 
			
		||||
            logits: logits distribution shape (vocabulary size)
 | 
			
		||||
            top_k > 0: keep only top k tokens with highest probability (top-k filtering).
 | 
			
		||||
            top_p > 0.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering).
 | 
			
		||||
                Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751)
 | 
			
		||||
        From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317
 | 
			
		||||
    """
 | 
			
		||||
    assert logits.dim() == 1  # batch size 1 for now - could be updated for more but the code would be less clear
 | 
			
		||||
    top_k = min(top_k, logits.size(-1))  # Safety check
 | 
			
		||||
    if top_k > 0:
 | 
			
		||||
        # Remove all tokens with a probability less than the last token of the top-k
 | 
			
		||||
        indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
 | 
			
		||||
        logits[indices_to_remove] = filter_value
 | 
			
		||||
 | 
			
		||||
    if top_p > 0.0:
 | 
			
		||||
        sorted_logits, sorted_indices = torch.sort(logits, descending=True)
 | 
			
		||||
        cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
 | 
			
		||||
 | 
			
		||||
        # Remove tokens with cumulative probability above the threshold
 | 
			
		||||
        sorted_indices_to_remove = cumulative_probs > top_p
 | 
			
		||||
        # Shift the indices to the right to keep also the first token above the threshold
 | 
			
		||||
        sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
 | 
			
		||||
        sorted_indices_to_remove[..., 0] = 0
 | 
			
		||||
 | 
			
		||||
        indices_to_remove = sorted_indices[sorted_indices_to_remove]
 | 
			
		||||
        logits[indices_to_remove] = filter_value
 | 
			
		||||
    return logits
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def sample_sequence(model, length, context, num_samples=1, temperature=1, top_k=0, top_p=0.0, is_xlnet=False, device='cpu'):
 | 
			
		||||
    context = torch.tensor(context, dtype=torch.long, device=device)
 | 
			
		||||
    context = context.unsqueeze(0).repeat(num_samples, 1)
 | 
			
		||||
    generated = context
 | 
			
		||||
    with torch.no_grad():
 | 
			
		||||
        for _ in trange(length):
 | 
			
		||||
 | 
			
		||||
            inputs = {'input_ids': generated}
 | 
			
		||||
            if is_xlnet: 
 | 
			
		||||
                # XLNet is a direct (predict same token, not next token) and bi-directional model by default
 | 
			
		||||
                # => need one additional dummy token in the input (will be masked), attention mask and target mapping (see model docstring)
 | 
			
		||||
                input_ids = torch.cat((generated, torch.zeros((1, 1), dtype=torch.long, device=device)), dim=1)
 | 
			
		||||
                perm_mask = torch.zeros((1, input_ids.shape[1], input_ids.shape[1]), dtype=torch.float, device=device)
 | 
			
		||||
                perm_mask[:, :, -1] = 1.0  # Previous tokens don't see last token
 | 
			
		||||
                target_mapping = torch.zeros((1, 1, input_ids.shape[1]), dtype=torch.float, device=device)
 | 
			
		||||
                target_mapping[0, 0, -1] = 1.0  # predict last token
 | 
			
		||||
                inputs = {'input_ids': input_ids, 'perm_mask': perm_mask, 'target_mapping': target_mapping}
 | 
			
		||||
 | 
			
		||||
            outputs = model(**inputs)  # Note: we could also use 'past' with GPT-2/Transfo-XL/XLNet (cached hidden-states)
 | 
			
		||||
            next_token_logits = outputs[0][0, -1, :] / temperature
 | 
			
		||||
            filtered_logits = top_k_top_p_filtering(next_token_logits, top_k=top_k, top_p=top_p)
 | 
			
		||||
            next_token = torch.multinomial(F.softmax(filtered_logits, dim=-1), num_samples=1)
 | 
			
		||||
            generated = torch.cat((generated, next_token.unsqueeze(0)), dim=1)
 | 
			
		||||
    return generated
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def main():
 | 
			
		||||
    parser = argparse.ArgumentParser()
 | 
			
		||||
    parser.add_argument("--model_type", default=None, type=str, required=True,
 | 
			
		||||
                        help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()))
 | 
			
		||||
    parser.add_argument("--model_name_or_path", default=None, type=str, required=True,
 | 
			
		||||
                        help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(ALL_MODELS))
 | 
			
		||||
    parser.add_argument("--prompt", type=str, default="")
 | 
			
		||||
    parser.add_argument("--padding_text", type=str, default="")
 | 
			
		||||
    parser.add_argument("--length", type=int, default=20)
 | 
			
		||||
    parser.add_argument("--temperature", type=float, default=1.0)
 | 
			
		||||
    parser.add_argument("--top_k", type=int, default=0)
 | 
			
		||||
    parser.add_argument("--top_p", type=float, default=0.9)
 | 
			
		||||
    parser.add_argument("--no_cuda", action='store_true',
 | 
			
		||||
                        help="Avoid using CUDA when available")
 | 
			
		||||
    parser.add_argument('--seed', type=int, default=42,
 | 
			
		||||
                        help="random seed for initialization")
 | 
			
		||||
    args = parser.parse_args()
 | 
			
		||||
 | 
			
		||||
    args.device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
 | 
			
		||||
    args.n_gpu = torch.cuda.device_count()
 | 
			
		||||
 | 
			
		||||
    set_seed(args)
 | 
			
		||||
 | 
			
		||||
    args.model_type = args.model_type.lower()
 | 
			
		||||
    model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
 | 
			
		||||
    tokenizer = tokenizer_class.from_pretrained(args.model_name_or_path)
 | 
			
		||||
    model = model_class.from_pretrained(args.model_name_or_path)
 | 
			
		||||
    model.to(args.device)
 | 
			
		||||
    model.eval()
 | 
			
		||||
 | 
			
		||||
    if args.length < 0 and model.config.max_position_embeddings > 0:
 | 
			
		||||
        args.length = model.config.max_position_embeddings
 | 
			
		||||
    elif 0 < model.config.max_position_embeddings < args.length:
 | 
			
		||||
        args.length = model.config.max_position_embeddings  # No generation bigger than model size 
 | 
			
		||||
    elif args.length < 0:
 | 
			
		||||
        args.length = MAX_LENGTH  # avoid infinite loop
 | 
			
		||||
 | 
			
		||||
    print(args)
 | 
			
		||||
    while True:
 | 
			
		||||
        raw_text = args.prompt if args.prompt else input("Model prompt >>> ")
 | 
			
		||||
        if args.model_type in ["transfo-xl", "xlnet"]:
 | 
			
		||||
            # Models with memory likes to have a long prompt for short inputs.
 | 
			
		||||
            raw_text = (args.padding_text if args.padding_text else PADDING_TEXT) + raw_text
 | 
			
		||||
        context_tokens = tokenizer.encode(raw_text)
 | 
			
		||||
        out = sample_sequence(
 | 
			
		||||
            model=model,
 | 
			
		||||
            context=context_tokens,
 | 
			
		||||
            length=args.length,
 | 
			
		||||
            temperature=args.temperature,
 | 
			
		||||
            top_k=args.top_k,
 | 
			
		||||
            top_p=args.top_p,
 | 
			
		||||
            device=args.device,
 | 
			
		||||
            is_xlnet=bool(args.model_type == "xlnet"),
 | 
			
		||||
        )
 | 
			
		||||
        out = out[0, len(context_tokens):].tolist()
 | 
			
		||||
        text = tokenizer.decode(out, clean_up_tokenization_spaces=True)
 | 
			
		||||
        print(text)
 | 
			
		||||
        if args.prompt:
 | 
			
		||||
            break
 | 
			
		||||
    return text
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
if __name__ == '__main__':
 | 
			
		||||
    main()
 | 
			
		||||
							
								
								
									
										500
									
								
								examples/run_glue.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						@ -0,0 +1,500 @@
 | 
			
		||||
# coding=utf-8
 | 
			
		||||
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
 | 
			
		||||
# Copyright (c) 2018, NVIDIA CORPORATION.  All rights reserved.
 | 
			
		||||
#
 | 
			
		||||
# Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
# you may not use this file except in compliance with the License.
 | 
			
		||||
# You may obtain a copy of the License at
 | 
			
		||||
#
 | 
			
		||||
#     http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
#
 | 
			
		||||
# Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
# distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
# See the License for the specific language governing permissions and
 | 
			
		||||
# limitations under the License.
 | 
			
		||||
""" Finetuning the library models for sequence classification on GLUE (Bert, XLM, XLNet, RoBERTa)."""
 | 
			
		||||
 | 
			
		||||
from __future__ import absolute_import, division, print_function
 | 
			
		||||
 | 
			
		||||
import argparse
 | 
			
		||||
import glob
 | 
			
		||||
import logging
 | 
			
		||||
import os
 | 
			
		||||
import random
 | 
			
		||||
 | 
			
		||||
import numpy as np
 | 
			
		||||
import torch
 | 
			
		||||
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
 | 
			
		||||
                              TensorDataset)
 | 
			
		||||
from torch.utils.data.distributed import DistributedSampler
 | 
			
		||||
from tensorboardX import SummaryWriter
 | 
			
		||||
from tqdm import tqdm, trange
 | 
			
		||||
 | 
			
		||||
from transformers import (WEIGHTS_NAME, BertConfig,
 | 
			
		||||
                                  BertForSequenceClassification, BertTokenizer,
 | 
			
		||||
                                  RobertaConfig,
 | 
			
		||||
                                  RobertaForSequenceClassification,
 | 
			
		||||
                                  RobertaTokenizer,
 | 
			
		||||
                                  XLMConfig, XLMForSequenceClassification,
 | 
			
		||||
                                  XLMTokenizer, XLNetConfig,
 | 
			
		||||
                                  XLNetForSequenceClassification,
 | 
			
		||||
                                  XLNetTokenizer,
 | 
			
		||||
                                  DistilBertConfig,
 | 
			
		||||
                                  DistilBertForSequenceClassification,
 | 
			
		||||
                                  DistilBertTokenizer)
 | 
			
		||||
 | 
			
		||||
from transformers import AdamW, WarmupLinearSchedule
 | 
			
		||||
 | 
			
		||||
from transformers import glue_compute_metrics as compute_metrics
 | 
			
		||||
from transformers import glue_output_modes as output_modes
 | 
			
		||||
from transformers import glue_processors as processors
 | 
			
		||||
from transformers import glue_convert_examples_to_features as convert_examples_to_features
 | 
			
		||||
 | 
			
		||||
logger = logging.getLogger(__name__)
 | 
			
		||||
 | 
			
		||||
ALL_MODELS = sum((tuple(conf.pretrained_config_archive_map.keys()) for conf in (BertConfig, XLNetConfig, XLMConfig, RobertaConfig)), ())
 | 
			
		||||
 | 
			
		||||
MODEL_CLASSES = {
 | 
			
		||||
    'bert': (BertConfig, BertForSequenceClassification, BertTokenizer),
 | 
			
		||||
    'xlnet': (XLNetConfig, XLNetForSequenceClassification, XLNetTokenizer),
 | 
			
		||||
    'xlm': (XLMConfig, XLMForSequenceClassification, XLMTokenizer),
 | 
			
		||||
    'roberta': (RobertaConfig, RobertaForSequenceClassification, RobertaTokenizer),
 | 
			
		||||
    'distilbert': (DistilBertConfig, DistilBertForSequenceClassification, DistilBertTokenizer)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def set_seed(args):
 | 
			
		||||
    random.seed(args.seed)
 | 
			
		||||
    np.random.seed(args.seed)
 | 
			
		||||
    torch.manual_seed(args.seed)
 | 
			
		||||
    if args.n_gpu > 0:
 | 
			
		||||
        torch.cuda.manual_seed_all(args.seed)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def train(args, train_dataset, model, tokenizer):
 | 
			
		||||
    """ Train the model """
 | 
			
		||||
    if args.local_rank in [-1, 0]:
 | 
			
		||||
        tb_writer = SummaryWriter()
 | 
			
		||||
 | 
			
		||||
    args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
 | 
			
		||||
    train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
 | 
			
		||||
    train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
 | 
			
		||||
 | 
			
		||||
    if args.max_steps > 0:
 | 
			
		||||
        t_total = args.max_steps
 | 
			
		||||
        args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
 | 
			
		||||
    else:
 | 
			
		||||
        t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
 | 
			
		||||
 | 
			
		||||
    # Prepare optimizer and schedule (linear warmup and decay)
 | 
			
		||||
    no_decay = ['bias', 'LayerNorm.weight']
 | 
			
		||||
    optimizer_grouped_parameters = [
 | 
			
		||||
        {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay},
 | 
			
		||||
        {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
 | 
			
		||||
        ]
 | 
			
		||||
    optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
 | 
			
		||||
    scheduler = WarmupLinearSchedule(optimizer, warmup_steps=args.warmup_steps, t_total=t_total)
 | 
			
		||||
    if args.fp16:
 | 
			
		||||
        try:
 | 
			
		||||
            from apex import amp
 | 
			
		||||
        except ImportError:
 | 
			
		||||
            raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
 | 
			
		||||
        model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
 | 
			
		||||
 | 
			
		||||
    # multi-gpu training (should be after apex fp16 initialization)
 | 
			
		||||
    if args.n_gpu > 1:
 | 
			
		||||
        model = torch.nn.DataParallel(model)
 | 
			
		||||
 | 
			
		||||
    # Distributed training (should be after apex fp16 initialization)
 | 
			
		||||
    if args.local_rank != -1:
 | 
			
		||||
        model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],
 | 
			
		||||
                                                          output_device=args.local_rank,
 | 
			
		||||
                                                          find_unused_parameters=True)
 | 
			
		||||
 | 
			
		||||
    # Train!
 | 
			
		||||
    logger.info("***** Running training *****")
 | 
			
		||||
    logger.info("  Num examples = %d", len(train_dataset))
 | 
			
		||||
    logger.info("  Num Epochs = %d", args.num_train_epochs)
 | 
			
		||||
    logger.info("  Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
 | 
			
		||||
    logger.info("  Total train batch size (w. parallel, distributed & accumulation) = %d",
 | 
			
		||||
                   args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1))
 | 
			
		||||
    logger.info("  Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
 | 
			
		||||
    logger.info("  Total optimization steps = %d", t_total)
 | 
			
		||||
 | 
			
		||||
    global_step = 0
 | 
			
		||||
    tr_loss, logging_loss = 0.0, 0.0
 | 
			
		||||
    model.zero_grad()
 | 
			
		||||
    train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0])
 | 
			
		||||
    set_seed(args)  # Added here for reproductibility (even between python 2 and 3)
 | 
			
		||||
    for _ in train_iterator:
 | 
			
		||||
        epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])
 | 
			
		||||
        for step, batch in enumerate(epoch_iterator):
 | 
			
		||||
            model.train()
 | 
			
		||||
            batch = tuple(t.to(args.device) for t in batch)
 | 
			
		||||
            inputs = {'input_ids':      batch[0],
 | 
			
		||||
                      'attention_mask': batch[1],
 | 
			
		||||
                      'token_type_ids': batch[2] if args.model_type in ['bert', 'xlnet'] else None,  # XLM, DistilBERT and RoBERTa don't use segment_ids
 | 
			
		||||
                      'labels':         batch[3]}
 | 
			
		||||
            outputs = model(**inputs)
 | 
			
		||||
            loss = outputs[0]  # model outputs are always tuple in transformers (see doc)
 | 
			
		||||
 | 
			
		||||
            if args.n_gpu > 1:
 | 
			
		||||
                loss = loss.mean() # mean() to average on multi-gpu parallel training
 | 
			
		||||
            if args.gradient_accumulation_steps > 1:
 | 
			
		||||
                loss = loss / args.gradient_accumulation_steps
 | 
			
		||||
 | 
			
		||||
            if args.fp16:
 | 
			
		||||
                with amp.scale_loss(loss, optimizer) as scaled_loss:
 | 
			
		||||
                    scaled_loss.backward()
 | 
			
		||||
                torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
 | 
			
		||||
            else:
 | 
			
		||||
                loss.backward()
 | 
			
		||||
                torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
 | 
			
		||||
 | 
			
		||||
            tr_loss += loss.item()
 | 
			
		||||
            if (step + 1) % args.gradient_accumulation_steps == 0:
 | 
			
		||||
                optimizer.step()
 | 
			
		||||
                scheduler.step()  # Update learning rate schedule
 | 
			
		||||
                model.zero_grad()
 | 
			
		||||
                global_step += 1
 | 
			
		||||
 | 
			
		||||
                if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
 | 
			
		||||
                    # Log metrics
 | 
			
		||||
                    if args.local_rank == -1 and args.evaluate_during_training:  # Only evaluate when single GPU otherwise metrics may not average well
 | 
			
		||||
                        results = evaluate(args, model, tokenizer)
 | 
			
		||||
                        for key, value in results.items():
 | 
			
		||||
                            tb_writer.add_scalar('eval_{}'.format(key), value, global_step)
 | 
			
		||||
                    tb_writer.add_scalar('lr', scheduler.get_lr()[0], global_step)
 | 
			
		||||
                    tb_writer.add_scalar('loss', (tr_loss - logging_loss)/args.logging_steps, global_step)
 | 
			
		||||
                    logging_loss = tr_loss
 | 
			
		||||
 | 
			
		||||
                if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:
 | 
			
		||||
                    # Save model checkpoint
 | 
			
		||||
                    output_dir = os.path.join(args.output_dir, 'checkpoint-{}'.format(global_step))
 | 
			
		||||
                    if not os.path.exists(output_dir):
 | 
			
		||||
                        os.makedirs(output_dir)
 | 
			
		||||
                    model_to_save = model.module if hasattr(model, 'module') else model  # Take care of distributed/parallel training
 | 
			
		||||
                    model_to_save.save_pretrained(output_dir)
 | 
			
		||||
                    torch.save(args, os.path.join(output_dir, 'training_args.bin'))
 | 
			
		||||
                    logger.info("Saving model checkpoint to %s", output_dir)
 | 
			
		||||
 | 
			
		||||
            if args.max_steps > 0 and global_step > args.max_steps:
 | 
			
		||||
                epoch_iterator.close()
 | 
			
		||||
                break
 | 
			
		||||
        if args.max_steps > 0 and global_step > args.max_steps:
 | 
			
		||||
            train_iterator.close()
 | 
			
		||||
            break
 | 
			
		||||
 | 
			
		||||
    if args.local_rank in [-1, 0]:
 | 
			
		||||
        tb_writer.close()
 | 
			
		||||
 | 
			
		||||
    return global_step, tr_loss / global_step
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def evaluate(args, model, tokenizer, prefix=""):
 | 
			
		||||
    # Loop to handle MNLI double evaluation (matched, mis-matched)
 | 
			
		||||
    eval_task_names = ("mnli", "mnli-mm") if args.task_name == "mnli" else (args.task_name,)
 | 
			
		||||
    eval_outputs_dirs = (args.output_dir, args.output_dir + '-MM') if args.task_name == "mnli" else (args.output_dir,)
 | 
			
		||||
 | 
			
		||||
    results = {}
 | 
			
		||||
    for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs):
 | 
			
		||||
        eval_dataset = load_and_cache_examples(args, eval_task, tokenizer, evaluate=True)
 | 
			
		||||
 | 
			
		||||
        if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
 | 
			
		||||
            os.makedirs(eval_output_dir)
 | 
			
		||||
 | 
			
		||||
        args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
 | 
			
		||||
        # Note that DistributedSampler samples randomly
 | 
			
		||||
        eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)
 | 
			
		||||
        eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
 | 
			
		||||
 | 
			
		||||
        # Eval!
 | 
			
		||||
        logger.info("***** Running evaluation {} *****".format(prefix))
 | 
			
		||||
        logger.info("  Num examples = %d", len(eval_dataset))
 | 
			
		||||
        logger.info("  Batch size = %d", args.eval_batch_size)
 | 
			
		||||
        eval_loss = 0.0
 | 
			
		||||
        nb_eval_steps = 0
 | 
			
		||||
        preds = None
 | 
			
		||||
        out_label_ids = None
 | 
			
		||||
        for batch in tqdm(eval_dataloader, desc="Evaluating"):
 | 
			
		||||
            model.eval()
 | 
			
		||||
            batch = tuple(t.to(args.device) for t in batch)
 | 
			
		||||
 | 
			
		||||
            with torch.no_grad():
 | 
			
		||||
                inputs = {'input_ids':      batch[0],
 | 
			
		||||
                          'attention_mask': batch[1],
 | 
			
		||||
                          'token_type_ids': batch[2] if args.model_type in ['bert', 'xlnet'] else None,  # XLM, DistilBERT and RoBERTa don't use segment_ids
 | 
			
		||||
                          'labels':         batch[3]}
 | 
			
		||||
                outputs = model(**inputs)
 | 
			
		||||
                tmp_eval_loss, logits = outputs[:2]
 | 
			
		||||
 | 
			
		||||
                eval_loss += tmp_eval_loss.mean().item()
 | 
			
		||||
            nb_eval_steps += 1
 | 
			
		||||
            if preds is None:
 | 
			
		||||
                preds = logits.detach().cpu().numpy()
 | 
			
		||||
                out_label_ids = inputs['labels'].detach().cpu().numpy()
 | 
			
		||||
            else:
 | 
			
		||||
                preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
 | 
			
		||||
                out_label_ids = np.append(out_label_ids, inputs['labels'].detach().cpu().numpy(), axis=0)
 | 
			
		||||
 | 
			
		||||
        eval_loss = eval_loss / nb_eval_steps
 | 
			
		||||
        if args.output_mode == "classification":
 | 
			
		||||
            preds = np.argmax(preds, axis=1)
 | 
			
		||||
        elif args.output_mode == "regression":
 | 
			
		||||
            preds = np.squeeze(preds)
 | 
			
		||||
        result = compute_metrics(eval_task, preds, out_label_ids)
 | 
			
		||||
        results.update(result)
 | 
			
		||||
 | 
			
		||||
        output_eval_file = os.path.join(eval_output_dir, "eval_results.txt")
 | 
			
		||||
        with open(output_eval_file, "w") as writer:
 | 
			
		||||
            logger.info("***** Eval results {} *****".format(prefix))
 | 
			
		||||
            for key in sorted(result.keys()):
 | 
			
		||||
                logger.info("  %s = %s", key, str(result[key]))
 | 
			
		||||
                writer.write("%s = %s\n" % (key, str(result[key])))
 | 
			
		||||
 | 
			
		||||
    return results
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def load_and_cache_examples(args, task, tokenizer, evaluate=False):
 | 
			
		||||
    if args.local_rank not in [-1, 0] and not evaluate:
 | 
			
		||||
        torch.distributed.barrier()  # Make sure only the first process in distributed training process the dataset, and the others will use the cache
 | 
			
		||||
 | 
			
		||||
    processor = processors[task]()
 | 
			
		||||
    output_mode = output_modes[task]
 | 
			
		||||
    # Load data features from cache or dataset file
 | 
			
		||||
    cached_features_file = os.path.join(args.data_dir, 'cached_{}_{}_{}_{}'.format(
 | 
			
		||||
        'dev' if evaluate else 'train',
 | 
			
		||||
        list(filter(None, args.model_name_or_path.split('/'))).pop(),
 | 
			
		||||
        str(args.max_seq_length),
 | 
			
		||||
        str(task)))
 | 
			
		||||
    if os.path.exists(cached_features_file):
 | 
			
		||||
        logger.info("Loading features from cached file %s", cached_features_file)
 | 
			
		||||
        features = torch.load(cached_features_file)
 | 
			
		||||
    else:
 | 
			
		||||
        logger.info("Creating features from dataset file at %s", args.data_dir)
 | 
			
		||||
        label_list = processor.get_labels()
 | 
			
		||||
        if task in ['mnli', 'mnli-mm'] and args.model_type in ['roberta']:
 | 
			
		||||
            # HACK(label indices are swapped in RoBERTa pretrained model)
 | 
			
		||||
            label_list[1], label_list[2] = label_list[2], label_list[1] 
 | 
			
		||||
        examples = processor.get_dev_examples(args.data_dir) if evaluate else processor.get_train_examples(args.data_dir)
 | 
			
		||||
        features = convert_examples_to_features(examples,
 | 
			
		||||
                                                tokenizer,
 | 
			
		||||
                                                label_list=label_list,
 | 
			
		||||
                                                max_length=args.max_seq_length,
 | 
			
		||||
                                                output_mode=output_mode,
 | 
			
		||||
                                                pad_on_left=bool(args.model_type in ['xlnet']),                 # pad on the left for xlnet
 | 
			
		||||
                                                pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0],
 | 
			
		||||
                                                pad_token_segment_id=4 if args.model_type in ['xlnet'] else 0,
 | 
			
		||||
        )
 | 
			
		||||
        if args.local_rank in [-1, 0]:
 | 
			
		||||
            logger.info("Saving features into cached file %s", cached_features_file)
 | 
			
		||||
            torch.save(features, cached_features_file)
 | 
			
		||||
 | 
			
		||||
    if args.local_rank == 0 and not evaluate:
 | 
			
		||||
        torch.distributed.barrier()  # Make sure only the first process in distributed training process the dataset, and the others will use the cache
 | 
			
		||||
 | 
			
		||||
    # Convert to Tensors and build dataset
 | 
			
		||||
    all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
 | 
			
		||||
    all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)
 | 
			
		||||
    all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)
 | 
			
		||||
    if output_mode == "classification":
 | 
			
		||||
        all_labels = torch.tensor([f.label for f in features], dtype=torch.long)
 | 
			
		||||
    elif output_mode == "regression":
 | 
			
		||||
        all_labels = torch.tensor([f.label for f in features], dtype=torch.float)
 | 
			
		||||
 | 
			
		||||
    dataset = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_labels)
 | 
			
		||||
    return dataset
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def main():
 | 
			
		||||
    parser = argparse.ArgumentParser()
 | 
			
		||||
 | 
			
		||||
    ## Required parameters
 | 
			
		||||
    parser.add_argument("--data_dir", default=None, type=str, required=True,
 | 
			
		||||
                        help="The input data dir. Should contain the .tsv files (or other data files) for the task.")
 | 
			
		||||
    parser.add_argument("--model_type", default=None, type=str, required=True,
 | 
			
		||||
                        help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()))
 | 
			
		||||
    parser.add_argument("--model_name_or_path", default=None, type=str, required=True,
 | 
			
		||||
                        help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(ALL_MODELS))
 | 
			
		||||
    parser.add_argument("--task_name", default=None, type=str, required=True,
 | 
			
		||||
                        help="The name of the task to train selected in the list: " + ", ".join(processors.keys()))
 | 
			
		||||
    parser.add_argument("--output_dir", default=None, type=str, required=True,
 | 
			
		||||
                        help="The output directory where the model predictions and checkpoints will be written.")
 | 
			
		||||
 | 
			
		||||
    ## Other parameters
 | 
			
		||||
    parser.add_argument("--config_name", default="", type=str,
 | 
			
		||||
                        help="Pretrained config name or path if not the same as model_name")
 | 
			
		||||
    parser.add_argument("--tokenizer_name", default="", type=str,
 | 
			
		||||
                        help="Pretrained tokenizer name or path if not the same as model_name")
 | 
			
		||||
    parser.add_argument("--cache_dir", default="", type=str,
 | 
			
		||||
                        help="Where do you want to store the pre-trained models downloaded from s3")
 | 
			
		||||
    parser.add_argument("--max_seq_length", default=128, type=int,
 | 
			
		||||
                        help="The maximum total input sequence length after tokenization. Sequences longer "
 | 
			
		||||
                             "than this will be truncated, sequences shorter will be padded.")
 | 
			
		||||
    parser.add_argument("--do_train", action='store_true',
 | 
			
		||||
                        help="Whether to run training.")
 | 
			
		||||
    parser.add_argument("--do_eval", action='store_true',
 | 
			
		||||
                        help="Whether to run eval on the dev set.")
 | 
			
		||||
    parser.add_argument("--evaluate_during_training", action='store_true',
 | 
			
		||||
                        help="Rul evaluation during training at each logging step.")
 | 
			
		||||
    parser.add_argument("--do_lower_case", action='store_true',
 | 
			
		||||
                        help="Set this flag if you are using an uncased model.")
 | 
			
		||||
 | 
			
		||||
    parser.add_argument("--per_gpu_train_batch_size", default=8, type=int,
 | 
			
		||||
                        help="Batch size per GPU/CPU for training.")
 | 
			
		||||
    parser.add_argument("--per_gpu_eval_batch_size", default=8, type=int,
 | 
			
		||||
                        help="Batch size per GPU/CPU for evaluation.")
 | 
			
		||||
    parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
 | 
			
		||||
                        help="Number of updates steps to accumulate before performing a backward/update pass.")
 | 
			
		||||
    parser.add_argument("--learning_rate", default=5e-5, type=float,
 | 
			
		||||
                        help="The initial learning rate for Adam.")
 | 
			
		||||
    parser.add_argument("--weight_decay", default=0.0, type=float,
 | 
			
		||||
                        help="Weight deay if we apply some.")
 | 
			
		||||
    parser.add_argument("--adam_epsilon", default=1e-8, type=float,
 | 
			
		||||
                        help="Epsilon for Adam optimizer.")
 | 
			
		||||
    parser.add_argument("--max_grad_norm", default=1.0, type=float,
 | 
			
		||||
                        help="Max gradient norm.")
 | 
			
		||||
    parser.add_argument("--num_train_epochs", default=3.0, type=float,
 | 
			
		||||
                        help="Total number of training epochs to perform.")
 | 
			
		||||
    parser.add_argument("--max_steps", default=-1, type=int,
 | 
			
		||||
                        help="If > 0: set total number of training steps to perform. Override num_train_epochs.")
 | 
			
		||||
    parser.add_argument("--warmup_steps", default=0, type=int,
 | 
			
		||||
                        help="Linear warmup over warmup_steps.")
 | 
			
		||||
 | 
			
		||||
    parser.add_argument('--logging_steps', type=int, default=50,
 | 
			
		||||
                        help="Log every X updates steps.")
 | 
			
		||||
    parser.add_argument('--save_steps', type=int, default=50,
 | 
			
		||||
                        help="Save checkpoint every X updates steps.")
 | 
			
		||||
    parser.add_argument("--eval_all_checkpoints", action='store_true',
 | 
			
		||||
                        help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number")
 | 
			
		||||
    parser.add_argument("--no_cuda", action='store_true',
 | 
			
		||||
                        help="Avoid using CUDA when available")
 | 
			
		||||
    parser.add_argument('--overwrite_output_dir', action='store_true',
 | 
			
		||||
                        help="Overwrite the content of the output directory")
 | 
			
		||||
    parser.add_argument('--overwrite_cache', action='store_true',
 | 
			
		||||
                        help="Overwrite the cached training and evaluation sets")
 | 
			
		||||
    parser.add_argument('--seed', type=int, default=42,
 | 
			
		||||
                        help="random seed for initialization")
 | 
			
		||||
 | 
			
		||||
    parser.add_argument('--fp16', action='store_true',
 | 
			
		||||
                        help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit")
 | 
			
		||||
    parser.add_argument('--fp16_opt_level', type=str, default='O1',
 | 
			
		||||
                        help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
 | 
			
		||||
                             "See details at https://nvidia.github.io/apex/amp.html")
 | 
			
		||||
    parser.add_argument("--local_rank", type=int, default=-1,
 | 
			
		||||
                        help="For distributed training: local_rank")
 | 
			
		||||
    parser.add_argument('--server_ip', type=str, default='', help="For distant debugging.")
 | 
			
		||||
    parser.add_argument('--server_port', type=str, default='', help="For distant debugging.")
 | 
			
		||||
    args = parser.parse_args()
 | 
			
		||||
 | 
			
		||||
    if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir:
 | 
			
		||||
        raise ValueError("Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(args.output_dir))
 | 
			
		||||
 | 
			
		||||
    # Setup distant debugging if needed
 | 
			
		||||
    if args.server_ip and args.server_port:
 | 
			
		||||
        # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
 | 
			
		||||
        import ptvsd
 | 
			
		||||
        print("Waiting for debugger attach")
 | 
			
		||||
        ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
 | 
			
		||||
        ptvsd.wait_for_attach()
 | 
			
		||||
 | 
			
		||||
    # Setup CUDA, GPU & distributed training
 | 
			
		||||
    if args.local_rank == -1 or args.no_cuda:
 | 
			
		||||
        device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
 | 
			
		||||
        args.n_gpu = torch.cuda.device_count()
 | 
			
		||||
    else:  # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
 | 
			
		||||
        torch.cuda.set_device(args.local_rank)
 | 
			
		||||
        device = torch.device("cuda", args.local_rank)
 | 
			
		||||
        torch.distributed.init_process_group(backend='nccl')
 | 
			
		||||
        args.n_gpu = 1
 | 
			
		||||
    args.device = device
 | 
			
		||||
 | 
			
		||||
    # Setup logging
 | 
			
		||||
    logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s -   %(message)s',
 | 
			
		||||
                        datefmt = '%m/%d/%Y %H:%M:%S',
 | 
			
		||||
                        level = logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
 | 
			
		||||
    logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
 | 
			
		||||
                    args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16)
 | 
			
		||||
 | 
			
		||||
    # Set seed
 | 
			
		||||
    set_seed(args)
 | 
			
		||||
 | 
			
		||||
    # Prepare GLUE task
 | 
			
		||||
    args.task_name = args.task_name.lower()
 | 
			
		||||
    if args.task_name not in processors:
 | 
			
		||||
        raise ValueError("Task not found: %s" % (args.task_name))
 | 
			
		||||
    processor = processors[args.task_name]()
 | 
			
		||||
    args.output_mode = output_modes[args.task_name]
 | 
			
		||||
    label_list = processor.get_labels()
 | 
			
		||||
    num_labels = len(label_list)
 | 
			
		||||
 | 
			
		||||
    # Load pretrained model and tokenizer
 | 
			
		||||
    if args.local_rank not in [-1, 0]:
 | 
			
		||||
        torch.distributed.barrier()  # Make sure only the first process in distributed training will download model & vocab
 | 
			
		||||
 | 
			
		||||
    args.model_type = args.model_type.lower()
 | 
			
		||||
    config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
 | 
			
		||||
    config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path, num_labels=num_labels, finetuning_task=args.task_name)
 | 
			
		||||
    tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, do_lower_case=args.do_lower_case)
 | 
			
		||||
    model = model_class.from_pretrained(args.model_name_or_path, from_tf=bool('.ckpt' in args.model_name_or_path), config=config)
 | 
			
		||||
 | 
			
		||||
    if args.local_rank == 0:
 | 
			
		||||
        torch.distributed.barrier()  # Make sure only the first process in distributed training will download model & vocab
 | 
			
		||||
 | 
			
		||||
    model.to(args.device)
 | 
			
		||||
 | 
			
		||||
    logger.info("Training/evaluation parameters %s", args)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    # Training
 | 
			
		||||
    if args.do_train:
 | 
			
		||||
        train_dataset = load_and_cache_examples(args, args.task_name, tokenizer, evaluate=False)
 | 
			
		||||
        global_step, tr_loss = train(args, train_dataset, model, tokenizer)
 | 
			
		||||
        logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    # Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained()
 | 
			
		||||
    if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
 | 
			
		||||
        # Create output directory if needed
 | 
			
		||||
        if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
 | 
			
		||||
            os.makedirs(args.output_dir)
 | 
			
		||||
 | 
			
		||||
        logger.info("Saving model checkpoint to %s", args.output_dir)
 | 
			
		||||
        # Save a trained model, configuration and tokenizer using `save_pretrained()`.
 | 
			
		||||
        # They can then be reloaded using `from_pretrained()`
 | 
			
		||||
        model_to_save = model.module if hasattr(model, 'module') else model  # Take care of distributed/parallel training
 | 
			
		||||
        model_to_save.save_pretrained(args.output_dir)
 | 
			
		||||
        tokenizer.save_pretrained(args.output_dir)
 | 
			
		||||
 | 
			
		||||
        # Good practice: save your training arguments together with the trained model
 | 
			
		||||
        torch.save(args, os.path.join(args.output_dir, 'training_args.bin'))
 | 
			
		||||
 | 
			
		||||
        # Load a trained model and vocabulary that you have fine-tuned
 | 
			
		||||
        model = model_class.from_pretrained(args.output_dir)
 | 
			
		||||
        tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case)
 | 
			
		||||
        model.to(args.device)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    # Evaluation
 | 
			
		||||
    results = {}
 | 
			
		||||
    if args.do_eval and args.local_rank in [-1, 0]:
 | 
			
		||||
        tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case)
 | 
			
		||||
        checkpoints = [args.output_dir]
 | 
			
		||||
        if args.eval_all_checkpoints:
 | 
			
		||||
            checkpoints = list(os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True)))
 | 
			
		||||
            logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN)  # Reduce logging
 | 
			
		||||
        logger.info("Evaluate the following checkpoints: %s", checkpoints)
 | 
			
		||||
        for checkpoint in checkpoints:
 | 
			
		||||
            global_step = checkpoint.split('-')[-1] if len(checkpoints) > 1 else ""
 | 
			
		||||
            model = model_class.from_pretrained(checkpoint)
 | 
			
		||||
            model.to(args.device)
 | 
			
		||||
            result = evaluate(args, model, tokenizer, prefix=global_step)
 | 
			
		||||
            result = dict((k + '_{}'.format(global_step), v) for k, v in result.items())
 | 
			
		||||
            results.update(result)
 | 
			
		||||
 | 
			
		||||
    return results
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
if __name__ == "__main__":
 | 
			
		||||
    main()
 | 
			
		||||
@ -1,131 +0,0 @@
 | 
			
		||||
#!/usr/bin/env python3
 | 
			
		||||
 | 
			
		||||
import argparse
 | 
			
		||||
import logging
 | 
			
		||||
from tqdm import trange
 | 
			
		||||
 | 
			
		||||
import torch
 | 
			
		||||
import torch.nn.functional as F
 | 
			
		||||
import numpy as np
 | 
			
		||||
 | 
			
		||||
from pytorch_pretrained_bert import GPT2LMHeadModel, GPT2Tokenizer
 | 
			
		||||
 | 
			
		||||
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s -   %(message)s',
 | 
			
		||||
                    datefmt = '%m/%d/%Y %H:%M:%S',
 | 
			
		||||
                    level = logging.INFO)
 | 
			
		||||
logger = logging.getLogger(__name__)
 | 
			
		||||
 | 
			
		||||
def top_k_logits(logits, k):
 | 
			
		||||
    """
 | 
			
		||||
    Masks everything but the k top entries as -infinity (1e10).
 | 
			
		||||
    Used to mask logits such that e^-infinity -> 0 won't contribute to the
 | 
			
		||||
    sum of the denominator.
 | 
			
		||||
    """
 | 
			
		||||
    if k == 0:
 | 
			
		||||
        return logits
 | 
			
		||||
    else:
 | 
			
		||||
        values = torch.topk(logits, k)[0]
 | 
			
		||||
        batch_mins = values[:, -1].view(-1, 1).expand_as(logits)
 | 
			
		||||
        return torch.where(logits < batch_mins, torch.ones_like(logits) * -1e10, logits)
 | 
			
		||||
 | 
			
		||||
def sample_sequence(model, length, start_token=None, batch_size=None, context=None, temperature=1, top_k=0, device='cuda', sample=True):
 | 
			
		||||
    if start_token is None:
 | 
			
		||||
        assert context is not None, 'Specify exactly one of start_token and context!'
 | 
			
		||||
        context = torch.tensor(context, device=device, dtype=torch.long).unsqueeze(0).repeat(batch_size, 1)
 | 
			
		||||
    else:
 | 
			
		||||
        assert context is None, 'Specify exactly one of start_token and context!'
 | 
			
		||||
        context = torch.full((batch_size, 1), start_token, device=device, dtype=torch.long)
 | 
			
		||||
    prev = context
 | 
			
		||||
    output = context
 | 
			
		||||
    past = None
 | 
			
		||||
    with torch.no_grad():
 | 
			
		||||
        for i in trange(length):
 | 
			
		||||
            logits, past = model(prev, past=past)
 | 
			
		||||
            logits = logits[:, -1, :] / temperature
 | 
			
		||||
            logits = top_k_logits(logits, k=top_k)
 | 
			
		||||
            log_probs = F.softmax(logits, dim=-1)
 | 
			
		||||
            if sample:
 | 
			
		||||
                prev = torch.multinomial(log_probs, num_samples=1)
 | 
			
		||||
            else:
 | 
			
		||||
                _, prev = torch.topk(log_probs, k=1, dim=-1)
 | 
			
		||||
            output = torch.cat((output, prev), dim=1)
 | 
			
		||||
    return output
 | 
			
		||||
 | 
			
		||||
def run_model():
 | 
			
		||||
    parser = argparse.ArgumentParser()
 | 
			
		||||
    parser.add_argument('--model_name_or_path', type=str, default='gpt2', help='pretrained model name or path to local checkpoint')
 | 
			
		||||
    parser.add_argument("--seed", type=int, default=0)
 | 
			
		||||
    parser.add_argument("--nsamples", type=int, default=1)
 | 
			
		||||
    parser.add_argument("--batch_size", type=int, default=-1)
 | 
			
		||||
    parser.add_argument("--length", type=int, default=-1)
 | 
			
		||||
    parser.add_argument("--temperature", type=float, default=1.0)
 | 
			
		||||
    parser.add_argument("--top_k", type=int, default=0)
 | 
			
		||||
    parser.add_argument('--unconditional', action='store_true', help='If true, unconditional generation.')
 | 
			
		||||
    args = parser.parse_args()
 | 
			
		||||
    print(args)
 | 
			
		||||
 | 
			
		||||
    if args.batch_size == -1:
 | 
			
		||||
        args.batch_size = 1
 | 
			
		||||
    assert args.nsamples % args.batch_size == 0
 | 
			
		||||
 | 
			
		||||
    np.random.seed(args.seed)
 | 
			
		||||
    torch.random.manual_seed(args.seed)
 | 
			
		||||
    torch.cuda.manual_seed(args.seed)
 | 
			
		||||
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
 | 
			
		||||
 | 
			
		||||
    enc = GPT2Tokenizer.from_pretrained(args.model_name_or_path)
 | 
			
		||||
    model = GPT2LMHeadModel.from_pretrained(args.model_name_or_path)
 | 
			
		||||
    model.to(device)
 | 
			
		||||
    model.eval()
 | 
			
		||||
 | 
			
		||||
    if args.length == -1:
 | 
			
		||||
        args.length = model.config.n_ctx // 2
 | 
			
		||||
    elif args.length > model.config.n_ctx:
 | 
			
		||||
        raise ValueError("Can't get samples longer than window size: %s" % model.config.n_ctx)
 | 
			
		||||
 | 
			
		||||
    while True:
 | 
			
		||||
        context_tokens = []
 | 
			
		||||
        if not args.unconditional:
 | 
			
		||||
            raw_text = input("Model prompt >>> ")
 | 
			
		||||
            while not raw_text:
 | 
			
		||||
                print('Prompt should not be empty!')
 | 
			
		||||
                raw_text = input("Model prompt >>> ")
 | 
			
		||||
            context_tokens = enc.encode(raw_text)
 | 
			
		||||
            generated = 0
 | 
			
		||||
            for _ in range(args.nsamples // args.batch_size):
 | 
			
		||||
                out = sample_sequence(
 | 
			
		||||
                    model=model, length=args.length,
 | 
			
		||||
                    context=context_tokens,
 | 
			
		||||
                    start_token=None,
 | 
			
		||||
                    batch_size=args.batch_size,
 | 
			
		||||
                    temperature=args.temperature, top_k=args.top_k, device=device
 | 
			
		||||
                )
 | 
			
		||||
                out = out[:, len(context_tokens):].tolist()
 | 
			
		||||
                for i in range(args.batch_size):
 | 
			
		||||
                    generated += 1
 | 
			
		||||
                    text = enc.decode(out[i])
 | 
			
		||||
                    print("=" * 40 + " SAMPLE " + str(generated) + " " + "=" * 40)
 | 
			
		||||
                    print(text)
 | 
			
		||||
            print("=" * 80)
 | 
			
		||||
        else:
 | 
			
		||||
            generated = 0
 | 
			
		||||
            for _ in range(args.nsamples // args.batch_size):
 | 
			
		||||
                out = sample_sequence(
 | 
			
		||||
                    model=model, length=args.length,
 | 
			
		||||
                    context=None,
 | 
			
		||||
                    start_token=enc.encoder['<|endoftext|>'],
 | 
			
		||||
                    batch_size=args.batch_size,
 | 
			
		||||
                    temperature=args.temperature, top_k=args.top_k, device=device
 | 
			
		||||
                )
 | 
			
		||||
                out = out[:,1:].tolist()
 | 
			
		||||
                for i in range(args.batch_size):
 | 
			
		||||
                    generated += 1
 | 
			
		||||
                    text = enc.decode(out[i])
 | 
			
		||||
                    print("=" * 40 + " SAMPLE " + str(generated) + " " + "=" * 40)
 | 
			
		||||
                    print(text)
 | 
			
		||||
            print("=" * 80)
 | 
			
		||||
 | 
			
		||||
if __name__ == '__main__':
 | 
			
		||||
    run_model()
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										498
									
								
								examples/run_lm_finetuning.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						@ -0,0 +1,498 @@
 | 
			
		||||
# coding=utf-8
 | 
			
		||||
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
 | 
			
		||||
# Copyright (c) 2018, NVIDIA CORPORATION.  All rights reserved.
 | 
			
		||||
#
 | 
			
		||||
# Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
# you may not use this file except in compliance with the License.
 | 
			
		||||
# You may obtain a copy of the License at
 | 
			
		||||
#
 | 
			
		||||
#     http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
#
 | 
			
		||||
# Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
# distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
# See the License for the specific language governing permissions and
 | 
			
		||||
# limitations under the License.
 | 
			
		||||
"""
 | 
			
		||||
Fine-tuning the library models for language modeling on a text file (GPT, GPT-2, BERT, RoBERTa).
 | 
			
		||||
GPT and GPT-2 are fine-tuned using a causal language modeling (CLM) loss while BERT and RoBERTa are fine-tuned
 | 
			
		||||
using a masked language modeling (MLM) loss.
 | 
			
		||||
"""
 | 
			
		||||
 | 
			
		||||
from __future__ import absolute_import, division, print_function
 | 
			
		||||
 | 
			
		||||
import argparse
 | 
			
		||||
import glob
 | 
			
		||||
import logging
 | 
			
		||||
import os
 | 
			
		||||
import pickle
 | 
			
		||||
import random
 | 
			
		||||
 | 
			
		||||
import numpy as np
 | 
			
		||||
import torch
 | 
			
		||||
from torch.utils.data import DataLoader, Dataset, SequentialSampler, RandomSampler
 | 
			
		||||
from torch.utils.data.distributed import DistributedSampler
 | 
			
		||||
from tensorboardX import SummaryWriter
 | 
			
		||||
from tqdm import tqdm, trange
 | 
			
		||||
 | 
			
		||||
from transformers import (WEIGHTS_NAME, AdamW, WarmupLinearSchedule,
 | 
			
		||||
                                  BertConfig, BertForMaskedLM, BertTokenizer,
 | 
			
		||||
                                  GPT2Config, GPT2LMHeadModel, GPT2Tokenizer,
 | 
			
		||||
                                  OpenAIGPTConfig, OpenAIGPTLMHeadModel, OpenAIGPTTokenizer,
 | 
			
		||||
                                  RobertaConfig, RobertaForMaskedLM, RobertaTokenizer,
 | 
			
		||||
                                  DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
logger = logging.getLogger(__name__)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
MODEL_CLASSES = {
 | 
			
		||||
    'gpt2': (GPT2Config, GPT2LMHeadModel, GPT2Tokenizer),
 | 
			
		||||
    'openai-gpt': (OpenAIGPTConfig, OpenAIGPTLMHeadModel, OpenAIGPTTokenizer),
 | 
			
		||||
    'bert': (BertConfig, BertForMaskedLM, BertTokenizer),
 | 
			
		||||
    'roberta': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
 | 
			
		||||
    'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class TextDataset(Dataset):
 | 
			
		||||
    def __init__(self, tokenizer, file_path='train', block_size=512):
 | 
			
		||||
        assert os.path.isfile(file_path)
 | 
			
		||||
        directory, filename = os.path.split(file_path)
 | 
			
		||||
        cached_features_file = os.path.join(directory, f'cached_lm_{block_size}_{filename}')
 | 
			
		||||
 | 
			
		||||
        if os.path.exists(cached_features_file):
 | 
			
		||||
            logger.info("Loading features from cached file %s", cached_features_file)
 | 
			
		||||
            with open(cached_features_file, 'rb') as handle:
 | 
			
		||||
                self.examples = pickle.load(handle)
 | 
			
		||||
        else:
 | 
			
		||||
            logger.info("Creating features from dataset file at %s", directory)
 | 
			
		||||
 | 
			
		||||
            self.examples = []
 | 
			
		||||
            with open(file_path, encoding="utf-8") as f:
 | 
			
		||||
                text = f.read()
 | 
			
		||||
 | 
			
		||||
            tokenized_text = tokenizer.convert_tokens_to_ids(tokenizer.tokenize(text))
 | 
			
		||||
 | 
			
		||||
            while len(tokenized_text) >= block_size:  # Truncate in block of block_size
 | 
			
		||||
                self.examples.append(tokenizer.add_special_tokens_single_sequence(tokenized_text[:block_size]))
 | 
			
		||||
                tokenized_text = tokenized_text[block_size:]
 | 
			
		||||
            # Note that we are loosing the last truncated example here for the sake of simplicity (no padding)
 | 
			
		||||
            # If your dataset is small, first you should loook for a bigger one :-) and second you
 | 
			
		||||
            # can change this behavior by adding (model specific) padding.
 | 
			
		||||
 | 
			
		||||
            logger.info("Saving features into cached file %s", cached_features_file)
 | 
			
		||||
            with open(cached_features_file, 'wb') as handle:
 | 
			
		||||
                pickle.dump(self.examples, handle, protocol=pickle.HIGHEST_PROTOCOL)
 | 
			
		||||
 | 
			
		||||
    def __len__(self):
 | 
			
		||||
        return len(self.examples)
 | 
			
		||||
 | 
			
		||||
    def __getitem__(self, item):
 | 
			
		||||
        return torch.tensor(self.examples[item])
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def load_and_cache_examples(args, tokenizer, evaluate=False):
 | 
			
		||||
    dataset = TextDataset(tokenizer, file_path=args.eval_data_file if evaluate else args.train_data_file, block_size=args.block_size)
 | 
			
		||||
    return dataset
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def set_seed(args):
 | 
			
		||||
    random.seed(args.seed)
 | 
			
		||||
    np.random.seed(args.seed)
 | 
			
		||||
    torch.manual_seed(args.seed)
 | 
			
		||||
    if args.n_gpu > 0:
 | 
			
		||||
        torch.cuda.manual_seed_all(args.seed)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def mask_tokens(inputs, tokenizer, args):
 | 
			
		||||
    """ Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. """
 | 
			
		||||
    labels = inputs.clone()
 | 
			
		||||
    # We sample a few tokens in each sequence for masked-LM training (with probability args.mlm_probability defaults to 0.15 in Bert/RoBERTa)
 | 
			
		||||
    masked_indices = torch.bernoulli(torch.full(labels.shape, args.mlm_probability)).bool()
 | 
			
		||||
    labels[~masked_indices] = -1  # We only compute loss on masked tokens
 | 
			
		||||
 | 
			
		||||
    # 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
 | 
			
		||||
    indices_replaced = torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices
 | 
			
		||||
    inputs[indices_replaced] = tokenizer.convert_tokens_to_ids(tokenizer.mask_token)
 | 
			
		||||
 | 
			
		||||
    # 10% of the time, we replace masked input tokens with random word
 | 
			
		||||
    indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced
 | 
			
		||||
    random_words = torch.randint(len(tokenizer), labels.shape, dtype=torch.long)
 | 
			
		||||
    inputs[indices_random] = random_words[indices_random]
 | 
			
		||||
 | 
			
		||||
    # The rest of the time (10% of the time) we keep the masked input tokens unchanged
 | 
			
		||||
    return inputs, labels
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def train(args, train_dataset, model, tokenizer):
 | 
			
		||||
    """ Train the model """
 | 
			
		||||
    if args.local_rank in [-1, 0]:
 | 
			
		||||
        tb_writer = SummaryWriter()
 | 
			
		||||
 | 
			
		||||
    args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
 | 
			
		||||
    train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
 | 
			
		||||
    train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
 | 
			
		||||
 | 
			
		||||
    if args.max_steps > 0:
 | 
			
		||||
        t_total = args.max_steps
 | 
			
		||||
        args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
 | 
			
		||||
    else:
 | 
			
		||||
        t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
 | 
			
		||||
 | 
			
		||||
    # Prepare optimizer and schedule (linear warmup and decay)
 | 
			
		||||
    no_decay = ['bias', 'LayerNorm.weight']
 | 
			
		||||
    optimizer_grouped_parameters = [
 | 
			
		||||
        {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay},
 | 
			
		||||
        {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
 | 
			
		||||
        ]
 | 
			
		||||
    optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
 | 
			
		||||
    scheduler = WarmupLinearSchedule(optimizer, warmup_steps=args.warmup_steps, t_total=t_total)
 | 
			
		||||
    if args.fp16:
 | 
			
		||||
        try:
 | 
			
		||||
            from apex import amp
 | 
			
		||||
        except ImportError:
 | 
			
		||||
            raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
 | 
			
		||||
        model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
 | 
			
		||||
 | 
			
		||||
    # multi-gpu training (should be after apex fp16 initialization)
 | 
			
		||||
    if args.n_gpu > 1:
 | 
			
		||||
        model = torch.nn.DataParallel(model)
 | 
			
		||||
 | 
			
		||||
    # Distributed training (should be after apex fp16 initialization)
 | 
			
		||||
    if args.local_rank != -1:
 | 
			
		||||
        model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],
 | 
			
		||||
                                                          output_device=args.local_rank,
 | 
			
		||||
                                                          find_unused_parameters=True)
 | 
			
		||||
 | 
			
		||||
    # Train!
 | 
			
		||||
    logger.info("***** Running training *****")
 | 
			
		||||
    logger.info("  Num examples = %d", len(train_dataset))
 | 
			
		||||
    logger.info("  Num Epochs = %d", args.num_train_epochs)
 | 
			
		||||
    logger.info("  Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
 | 
			
		||||
    logger.info("  Total train batch size (w. parallel, distributed & accumulation) = %d",
 | 
			
		||||
                   args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1))
 | 
			
		||||
    logger.info("  Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
 | 
			
		||||
    logger.info("  Total optimization steps = %d", t_total)
 | 
			
		||||
 | 
			
		||||
    global_step = 0
 | 
			
		||||
    tr_loss, logging_loss = 0.0, 0.0
 | 
			
		||||
    model.zero_grad()
 | 
			
		||||
    train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0])
 | 
			
		||||
    set_seed(args)  # Added here for reproducibility (even between python 2 and 3)
 | 
			
		||||
    for _ in train_iterator:
 | 
			
		||||
        epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])
 | 
			
		||||
        for step, batch in enumerate(epoch_iterator):
 | 
			
		||||
            inputs, labels = mask_tokens(batch, tokenizer, args) if args.mlm else (batch, batch)
 | 
			
		||||
            inputs = inputs.to(args.device)
 | 
			
		||||
            labels = labels.to(args.device)
 | 
			
		||||
            model.train()
 | 
			
		||||
            outputs = model(inputs, masked_lm_labels=labels) if args.mlm else model(inputs, labels=labels)
 | 
			
		||||
            loss = outputs[0]  # model outputs are always tuple in transformers (see doc)
 | 
			
		||||
 | 
			
		||||
            if args.n_gpu > 1:
 | 
			
		||||
                loss = loss.mean()  # mean() to average on multi-gpu parallel training
 | 
			
		||||
            if args.gradient_accumulation_steps > 1:
 | 
			
		||||
                loss = loss / args.gradient_accumulation_steps
 | 
			
		||||
 | 
			
		||||
            if args.fp16:
 | 
			
		||||
                with amp.scale_loss(loss, optimizer) as scaled_loss:
 | 
			
		||||
                    scaled_loss.backward()
 | 
			
		||||
            else:
 | 
			
		||||
                loss.backward()
 | 
			
		||||
 | 
			
		||||
            tr_loss += loss.item()
 | 
			
		||||
            if (step + 1) % args.gradient_accumulation_steps == 0:
 | 
			
		||||
                if args.fp16:
 | 
			
		||||
                    torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
 | 
			
		||||
                else:
 | 
			
		||||
                    torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
 | 
			
		||||
                optimizer.step()
 | 
			
		||||
                scheduler.step()  # Update learning rate schedule
 | 
			
		||||
                model.zero_grad()
 | 
			
		||||
                global_step += 1
 | 
			
		||||
 | 
			
		||||
                if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
 | 
			
		||||
                    # Log metrics
 | 
			
		||||
                    if args.local_rank == -1 and args.evaluate_during_training:  # Only evaluate when single GPU otherwise metrics may not average well
 | 
			
		||||
                        results = evaluate(args, model, tokenizer)
 | 
			
		||||
                        for key, value in results.items():
 | 
			
		||||
                            tb_writer.add_scalar('eval_{}'.format(key), value, global_step)
 | 
			
		||||
                    tb_writer.add_scalar('lr', scheduler.get_lr()[0], global_step)
 | 
			
		||||
                    tb_writer.add_scalar('loss', (tr_loss - logging_loss)/args.logging_steps, global_step)
 | 
			
		||||
                    logging_loss = tr_loss
 | 
			
		||||
 | 
			
		||||
                if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:
 | 
			
		||||
                    # Save model checkpoint
 | 
			
		||||
                    output_dir = os.path.join(args.output_dir, 'checkpoint-{}'.format(global_step))
 | 
			
		||||
                    if not os.path.exists(output_dir):
 | 
			
		||||
                        os.makedirs(output_dir)
 | 
			
		||||
                    model_to_save = model.module if hasattr(model, 'module') else model  # Take care of distributed/parallel training
 | 
			
		||||
                    model_to_save.save_pretrained(output_dir)
 | 
			
		||||
                    torch.save(args, os.path.join(output_dir, 'training_args.bin'))
 | 
			
		||||
                    logger.info("Saving model checkpoint to %s", output_dir)
 | 
			
		||||
 | 
			
		||||
            if args.max_steps > 0 and global_step > args.max_steps:
 | 
			
		||||
                epoch_iterator.close()
 | 
			
		||||
                break
 | 
			
		||||
        if args.max_steps > 0 and global_step > args.max_steps:
 | 
			
		||||
            train_iterator.close()
 | 
			
		||||
            break
 | 
			
		||||
 | 
			
		||||
    if args.local_rank in [-1, 0]:
 | 
			
		||||
        tb_writer.close()
 | 
			
		||||
 | 
			
		||||
    return global_step, tr_loss / global_step
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def evaluate(args, model, tokenizer, prefix=""):
 | 
			
		||||
    # Loop to handle MNLI double evaluation (matched, mis-matched)
 | 
			
		||||
    eval_output_dir = args.output_dir
 | 
			
		||||
 | 
			
		||||
    eval_dataset = load_and_cache_examples(args, tokenizer, evaluate=True)
 | 
			
		||||
 | 
			
		||||
    if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
 | 
			
		||||
        os.makedirs(eval_output_dir)
 | 
			
		||||
 | 
			
		||||
    args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
 | 
			
		||||
    # Note that DistributedSampler samples randomly
 | 
			
		||||
    eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)
 | 
			
		||||
    eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
 | 
			
		||||
 | 
			
		||||
    # Eval!
 | 
			
		||||
    logger.info("***** Running evaluation {} *****".format(prefix))
 | 
			
		||||
    logger.info("  Num examples = %d", len(eval_dataset))
 | 
			
		||||
    logger.info("  Batch size = %d", args.eval_batch_size)
 | 
			
		||||
    eval_loss = 0.0
 | 
			
		||||
    nb_eval_steps = 0
 | 
			
		||||
    model.eval()
 | 
			
		||||
 | 
			
		||||
    for batch in tqdm(eval_dataloader, desc="Evaluating"):
 | 
			
		||||
        batch = batch.to(args.device)
 | 
			
		||||
 | 
			
		||||
        with torch.no_grad():
 | 
			
		||||
            outputs = model(batch, masked_lm_labels=batch) if args.mlm else model(batch, labels=batch)
 | 
			
		||||
            lm_loss = outputs[0]
 | 
			
		||||
            eval_loss += lm_loss.mean().item()
 | 
			
		||||
        nb_eval_steps += 1
 | 
			
		||||
 | 
			
		||||
    eval_loss = eval_loss / nb_eval_steps
 | 
			
		||||
    perplexity = torch.exp(torch.tensor(eval_loss))
 | 
			
		||||
 | 
			
		||||
    result = {
 | 
			
		||||
        "perplexity": perplexity
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    output_eval_file = os.path.join(eval_output_dir, "eval_results.txt")
 | 
			
		||||
    with open(output_eval_file, "w") as writer:
 | 
			
		||||
        logger.info("***** Eval results {} *****".format(prefix))
 | 
			
		||||
        for key in sorted(result.keys()):
 | 
			
		||||
            logger.info("  %s = %s", key, str(result[key]))
 | 
			
		||||
            writer.write("%s = %s\n" % (key, str(result[key])))
 | 
			
		||||
 | 
			
		||||
    return result
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def main():
 | 
			
		||||
    parser = argparse.ArgumentParser()
 | 
			
		||||
 | 
			
		||||
    ## Required parameters
 | 
			
		||||
    parser.add_argument("--train_data_file", default=None, type=str, required=True,
 | 
			
		||||
                        help="The input training data file (a text file).")
 | 
			
		||||
    parser.add_argument("--output_dir", default=None, type=str, required=True,
 | 
			
		||||
                        help="The output directory where the model predictions and checkpoints will be written.")
 | 
			
		||||
 | 
			
		||||
    ## Other parameters
 | 
			
		||||
    parser.add_argument("--eval_data_file", default=None, type=str,
 | 
			
		||||
                        help="An optional input evaluation data file to evaluate the perplexity on (a text file).")
 | 
			
		||||
 | 
			
		||||
    parser.add_argument("--model_type", default="bert", type=str,
 | 
			
		||||
                        help="The model architecture to be fine-tuned.")
 | 
			
		||||
    parser.add_argument("--model_name_or_path", default="bert-base-cased", type=str,
 | 
			
		||||
                        help="The model checkpoint for weights initialization.")
 | 
			
		||||
 | 
			
		||||
    parser.add_argument("--mlm", action='store_true',
 | 
			
		||||
                        help="Train with masked-language modeling loss instead of language modeling.")
 | 
			
		||||
    parser.add_argument("--mlm_probability", type=float, default=0.15,
 | 
			
		||||
                        help="Ratio of tokens to mask for masked language modeling loss")
 | 
			
		||||
 | 
			
		||||
    parser.add_argument("--config_name", default="", type=str,
 | 
			
		||||
                        help="Optional pretrained config name or path if not the same as model_name_or_path")
 | 
			
		||||
    parser.add_argument("--tokenizer_name", default="", type=str,
 | 
			
		||||
                        help="Optional pretrained tokenizer name or path if not the same as model_name_or_path")
 | 
			
		||||
    parser.add_argument("--cache_dir", default="", type=str,
 | 
			
		||||
                        help="Optional directory to store the pre-trained models downloaded from s3 (instread of the default one)")
 | 
			
		||||
    parser.add_argument("--block_size", default=-1, type=int,
 | 
			
		||||
                        help="Optional input sequence length after tokenization."
 | 
			
		||||
                             "The training dataset will be truncated in block of this size for training."
 | 
			
		||||
                             "Default to the model max input length for single sentence inputs (take into account special tokens).")
 | 
			
		||||
    parser.add_argument("--do_train", action='store_true',
 | 
			
		||||
                        help="Whether to run training.")
 | 
			
		||||
    parser.add_argument("--do_eval", action='store_true',
 | 
			
		||||
                        help="Whether to run eval on the dev set.")
 | 
			
		||||
    parser.add_argument("--evaluate_during_training", action='store_true',
 | 
			
		||||
                        help="Run evaluation during training at each logging step.")
 | 
			
		||||
    parser.add_argument("--do_lower_case", action='store_true',
 | 
			
		||||
                        help="Set this flag if you are using an uncased model.")
 | 
			
		||||
 | 
			
		||||
    parser.add_argument("--per_gpu_train_batch_size", default=4, type=int,
 | 
			
		||||
                        help="Batch size per GPU/CPU for training.")
 | 
			
		||||
    parser.add_argument("--per_gpu_eval_batch_size", default=4, type=int,
 | 
			
		||||
                        help="Batch size per GPU/CPU for evaluation.")
 | 
			
		||||
    parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
 | 
			
		||||
                        help="Number of updates steps to accumulate before performing a backward/update pass.")
 | 
			
		||||
    parser.add_argument("--learning_rate", default=5e-5, type=float,
 | 
			
		||||
                        help="The initial learning rate for Adam.")
 | 
			
		||||
    parser.add_argument("--weight_decay", default=0.0, type=float,
 | 
			
		||||
                        help="Weight deay if we apply some.")
 | 
			
		||||
    parser.add_argument("--adam_epsilon", default=1e-8, type=float,
 | 
			
		||||
                        help="Epsilon for Adam optimizer.")
 | 
			
		||||
    parser.add_argument("--max_grad_norm", default=1.0, type=float,
 | 
			
		||||
                        help="Max gradient norm.")
 | 
			
		||||
    parser.add_argument("--num_train_epochs", default=1.0, type=float,
 | 
			
		||||
                        help="Total number of training epochs to perform.")
 | 
			
		||||
    parser.add_argument("--max_steps", default=-1, type=int,
 | 
			
		||||
                        help="If > 0: set total number of training steps to perform. Override num_train_epochs.")
 | 
			
		||||
    parser.add_argument("--warmup_steps", default=0, type=int,
 | 
			
		||||
                        help="Linear warmup over warmup_steps.")
 | 
			
		||||
 | 
			
		||||
    parser.add_argument('--logging_steps', type=int, default=50,
 | 
			
		||||
                        help="Log every X updates steps.")
 | 
			
		||||
    parser.add_argument('--save_steps', type=int, default=50,
 | 
			
		||||
                        help="Save checkpoint every X updates steps.")
 | 
			
		||||
    parser.add_argument("--eval_all_checkpoints", action='store_true',
 | 
			
		||||
                        help="Evaluate all checkpoints starting with the same prefix as model_name_or_path ending and ending with step number")
 | 
			
		||||
    parser.add_argument("--no_cuda", action='store_true',
 | 
			
		||||
                        help="Avoid using CUDA when available")
 | 
			
		||||
    parser.add_argument('--overwrite_output_dir', action='store_true',
 | 
			
		||||
                        help="Overwrite the content of the output directory")
 | 
			
		||||
    parser.add_argument('--overwrite_cache', action='store_true',
 | 
			
		||||
                        help="Overwrite the cached training and evaluation sets")
 | 
			
		||||
    parser.add_argument('--seed', type=int, default=42,
 | 
			
		||||
                        help="random seed for initialization")
 | 
			
		||||
 | 
			
		||||
    parser.add_argument('--fp16', action='store_true',
 | 
			
		||||
                        help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit")
 | 
			
		||||
    parser.add_argument('--fp16_opt_level', type=str, default='O1',
 | 
			
		||||
                        help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
 | 
			
		||||
                             "See details at https://nvidia.github.io/apex/amp.html")
 | 
			
		||||
    parser.add_argument("--local_rank", type=int, default=-1,
 | 
			
		||||
                        help="For distributed training: local_rank")
 | 
			
		||||
    parser.add_argument('--server_ip', type=str, default='', help="For distant debugging.")
 | 
			
		||||
    parser.add_argument('--server_port', type=str, default='', help="For distant debugging.")
 | 
			
		||||
    args = parser.parse_args()
 | 
			
		||||
 | 
			
		||||
    if args.model_type in ["bert", "roberta", "distilbert"] and not args.mlm:
 | 
			
		||||
        raise ValueError("BERT and RoBERTa do not have LM heads but masked LM heads. They must be run using the --mlm "
 | 
			
		||||
                         "flag (masked language modeling).")
 | 
			
		||||
    if args.eval_data_file is None and args.do_eval:
 | 
			
		||||
        raise ValueError("Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file "
 | 
			
		||||
                         "or remove the --do_eval argument.")
 | 
			
		||||
 | 
			
		||||
    if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir:
 | 
			
		||||
        raise ValueError("Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(args.output_dir))
 | 
			
		||||
 | 
			
		||||
    # Setup distant debugging if needed
 | 
			
		||||
    if args.server_ip and args.server_port:
 | 
			
		||||
        # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
 | 
			
		||||
        import ptvsd
 | 
			
		||||
        print("Waiting for debugger attach")
 | 
			
		||||
        ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
 | 
			
		||||
        ptvsd.wait_for_attach()
 | 
			
		||||
 | 
			
		||||
    # Setup CUDA, GPU & distributed training
 | 
			
		||||
    if args.local_rank == -1 or args.no_cuda:
 | 
			
		||||
        device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
 | 
			
		||||
        args.n_gpu = torch.cuda.device_count()
 | 
			
		||||
    else:  # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
 | 
			
		||||
        torch.cuda.set_device(args.local_rank)
 | 
			
		||||
        device = torch.device("cuda", args.local_rank)
 | 
			
		||||
        torch.distributed.init_process_group(backend='nccl')
 | 
			
		||||
        args.n_gpu = 1
 | 
			
		||||
    args.device = device
 | 
			
		||||
 | 
			
		||||
    # Setup logging
 | 
			
		||||
    logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s -   %(message)s',
 | 
			
		||||
                        datefmt = '%m/%d/%Y %H:%M:%S',
 | 
			
		||||
                        level = logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
 | 
			
		||||
    logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
 | 
			
		||||
                    args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16)
 | 
			
		||||
 | 
			
		||||
    # Set seed
 | 
			
		||||
    set_seed(args)
 | 
			
		||||
 | 
			
		||||
    # Load pretrained model and tokenizer
 | 
			
		||||
    if args.local_rank not in [-1, 0]:
 | 
			
		||||
        torch.distributed.barrier()  # Barrier to make sure only the first process in distributed training download model & vocab
 | 
			
		||||
 | 
			
		||||
    config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
 | 
			
		||||
    config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path)
 | 
			
		||||
    tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, do_lower_case=args.do_lower_case)
 | 
			
		||||
    if args.block_size <= 0:
 | 
			
		||||
        args.block_size = tokenizer.max_len_single_sentence  # Our input block size will be the max possible for the model
 | 
			
		||||
    args.block_size = min(args.block_size, tokenizer.max_len_single_sentence)
 | 
			
		||||
    model = model_class.from_pretrained(args.model_name_or_path, from_tf=bool('.ckpt' in args.model_name_or_path), config=config)
 | 
			
		||||
    model.to(args.device)
 | 
			
		||||
 | 
			
		||||
    if args.local_rank == 0:
 | 
			
		||||
        torch.distributed.barrier()  # End of barrier to make sure only the first process in distributed training download model & vocab
 | 
			
		||||
 | 
			
		||||
    logger.info("Training/evaluation parameters %s", args)
 | 
			
		||||
 | 
			
		||||
    # Training
 | 
			
		||||
    if args.do_train:
 | 
			
		||||
        if args.local_rank not in [-1, 0]:
 | 
			
		||||
            torch.distributed.barrier()  # Barrier to make sure only the first process in distributed training process the dataset, and the others will use the cache
 | 
			
		||||
 | 
			
		||||
        train_dataset = load_and_cache_examples(args, tokenizer, evaluate=False)
 | 
			
		||||
 | 
			
		||||
        if args.local_rank == 0:
 | 
			
		||||
            torch.distributed.barrier()
 | 
			
		||||
 | 
			
		||||
        global_step, tr_loss = train(args, train_dataset, model, tokenizer)
 | 
			
		||||
        logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    # Saving best-practices: if you use save_pretrained for the model and tokenizer, you can reload them using from_pretrained()
 | 
			
		||||
    if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
 | 
			
		||||
        # Create output directory if needed
 | 
			
		||||
        if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
 | 
			
		||||
            os.makedirs(args.output_dir)
 | 
			
		||||
 | 
			
		||||
        logger.info("Saving model checkpoint to %s", args.output_dir)
 | 
			
		||||
        # Save a trained model, configuration and tokenizer using `save_pretrained()`.
 | 
			
		||||
        # They can then be reloaded using `from_pretrained()`
 | 
			
		||||
        model_to_save = model.module if hasattr(model, 'module') else model  # Take care of distributed/parallel training
 | 
			
		||||
        model_to_save.save_pretrained(args.output_dir)
 | 
			
		||||
        tokenizer.save_pretrained(args.output_dir)
 | 
			
		||||
 | 
			
		||||
        # Good practice: save your training arguments together with the trained model
 | 
			
		||||
        torch.save(args, os.path.join(args.output_dir, 'training_args.bin'))
 | 
			
		||||
 | 
			
		||||
        # Load a trained model and vocabulary that you have fine-tuned
 | 
			
		||||
        model = model_class.from_pretrained(args.output_dir)
 | 
			
		||||
        tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case)
 | 
			
		||||
        model.to(args.device)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    # Evaluation
 | 
			
		||||
    results = {}
 | 
			
		||||
    if args.do_eval and args.local_rank in [-1, 0]:
 | 
			
		||||
        checkpoints = [args.output_dir]
 | 
			
		||||
        if args.eval_all_checkpoints:
 | 
			
		||||
            checkpoints = list(os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True)))
 | 
			
		||||
            logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN)  # Reduce logging
 | 
			
		||||
        logger.info("Evaluate the following checkpoints: %s", checkpoints)
 | 
			
		||||
        for checkpoint in checkpoints:
 | 
			
		||||
            global_step = checkpoint.split('-')[-1] if len(checkpoints) > 1 else ""
 | 
			
		||||
            model = model_class.from_pretrained(checkpoint)
 | 
			
		||||
            model.to(args.device)
 | 
			
		||||
            result = evaluate(args, model, tokenizer, prefix=global_step)
 | 
			
		||||
            result = dict((k + '_{}'.format(global_step), v) for k, v in result.items())
 | 
			
		||||
            results.update(result)
 | 
			
		||||
 | 
			
		||||
    return results
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
if __name__ == "__main__":
 | 
			
		||||
    main()
 | 
			
		||||
							
								
								
									
										542
									
								
								examples/run_multiple_choice.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						@ -0,0 +1,542 @@
 | 
			
		||||
# coding=utf-8
 | 
			
		||||
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
 | 
			
		||||
# Copyright (c) 2018, NVIDIA CORPORATION.  All rights reserved.
 | 
			
		||||
#
 | 
			
		||||
# Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
# you may not use this file except in compliance with the License.
 | 
			
		||||
# You may obtain a copy of the License at
 | 
			
		||||
#
 | 
			
		||||
#     http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
#
 | 
			
		||||
# Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
# distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
# See the License for the specific language governing permissions and
 | 
			
		||||
# limitations under the License.
 | 
			
		||||
""" Finetuning the library models for multiple choice (Bert, Roberta, XLNet)."""
 | 
			
		||||
 | 
			
		||||
from __future__ import absolute_import, division, print_function
 | 
			
		||||
 | 
			
		||||
import argparse
 | 
			
		||||
import glob
 | 
			
		||||
import logging
 | 
			
		||||
import os
 | 
			
		||||
import random
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
import numpy as np
 | 
			
		||||
import torch
 | 
			
		||||
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
 | 
			
		||||
                              TensorDataset)
 | 
			
		||||
from torch.utils.data.distributed import DistributedSampler
 | 
			
		||||
from tensorboardX import SummaryWriter
 | 
			
		||||
from tqdm import tqdm, trange
 | 
			
		||||
 | 
			
		||||
from transformers import (WEIGHTS_NAME, BertConfig,
 | 
			
		||||
                                  BertForMultipleChoice, BertTokenizer,
 | 
			
		||||
                                  XLNetConfig, XLNetForMultipleChoice,
 | 
			
		||||
                                  XLNetTokenizer, RobertaConfig,
 | 
			
		||||
                                  RobertaForMultipleChoice, RobertaTokenizer)
 | 
			
		||||
 | 
			
		||||
from transformers import AdamW, WarmupLinearSchedule
 | 
			
		||||
 | 
			
		||||
from utils_multiple_choice import (convert_examples_to_features, processors)
 | 
			
		||||
 | 
			
		||||
logger = logging.getLogger(__name__)
 | 
			
		||||
 | 
			
		||||
ALL_MODELS = sum((tuple(conf.pretrained_config_archive_map.keys()) for conf in (BertConfig, XLNetConfig, RobertaConfig)), ())
 | 
			
		||||
 | 
			
		||||
MODEL_CLASSES = {
 | 
			
		||||
    'bert': (BertConfig, BertForMultipleChoice, BertTokenizer),
 | 
			
		||||
    'xlnet': (XLNetConfig, XLNetForMultipleChoice, XLNetTokenizer),
 | 
			
		||||
    'roberta': (RobertaConfig, RobertaForMultipleChoice, RobertaTokenizer)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
def select_field(features, field):
 | 
			
		||||
    return [
 | 
			
		||||
        [
 | 
			
		||||
            choice[field]
 | 
			
		||||
            for choice in feature.choices_features
 | 
			
		||||
        ]
 | 
			
		||||
        for feature in features
 | 
			
		||||
    ]
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def simple_accuracy(preds, labels):
 | 
			
		||||
    return (preds == labels).mean()
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def set_seed(args):
 | 
			
		||||
    random.seed(args.seed)
 | 
			
		||||
    np.random.seed(args.seed)
 | 
			
		||||
    torch.manual_seed(args.seed)
 | 
			
		||||
    if args.n_gpu > 0:
 | 
			
		||||
        torch.cuda.manual_seed_all(args.seed)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def train(args, train_dataset, model, tokenizer):
 | 
			
		||||
    """ Train the model """
 | 
			
		||||
    if args.local_rank in [-1, 0]:
 | 
			
		||||
        tb_writer = SummaryWriter()
 | 
			
		||||
 | 
			
		||||
    args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
 | 
			
		||||
    train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
 | 
			
		||||
    train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
 | 
			
		||||
 | 
			
		||||
    if args.max_steps > 0:
 | 
			
		||||
        t_total = args.max_steps
 | 
			
		||||
        args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
 | 
			
		||||
    else:
 | 
			
		||||
        t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
 | 
			
		||||
 | 
			
		||||
    # Prepare optimizer and schedule (linear warmup and decay)
 | 
			
		||||
    no_decay = ['bias', 'LayerNorm.weight']
 | 
			
		||||
    optimizer_grouped_parameters = [
 | 
			
		||||
        {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay},
 | 
			
		||||
        {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
 | 
			
		||||
        ]
 | 
			
		||||
    optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
 | 
			
		||||
    scheduler = WarmupLinearSchedule(optimizer, warmup_steps=args.warmup_steps, t_total=t_total)
 | 
			
		||||
    if args.fp16:
 | 
			
		||||
        try:
 | 
			
		||||
            from apex import amp
 | 
			
		||||
        except ImportError:
 | 
			
		||||
            raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
 | 
			
		||||
        model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
 | 
			
		||||
 | 
			
		||||
    # multi-gpu training (should be after apex fp16 initialization)
 | 
			
		||||
    if args.n_gpu > 1:
 | 
			
		||||
        model = torch.nn.DataParallel(model)
 | 
			
		||||
 | 
			
		||||
    # Distributed training (should be after apex fp16 initialization)
 | 
			
		||||
    if args.local_rank != -1:
 | 
			
		||||
        model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],
 | 
			
		||||
                                                          output_device=args.local_rank,
 | 
			
		||||
                                                          find_unused_parameters=True)
 | 
			
		||||
 | 
			
		||||
    # Train!
 | 
			
		||||
    logger.info("***** Running training *****")
 | 
			
		||||
    logger.info("  Num examples = %d", len(train_dataset))
 | 
			
		||||
    logger.info("  Num Epochs = %d", args.num_train_epochs)
 | 
			
		||||
    logger.info("  Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
 | 
			
		||||
    logger.info("  Total train batch size (w. parallel, distributed & accumulation) = %d",
 | 
			
		||||
                   args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1))
 | 
			
		||||
    logger.info("  Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
 | 
			
		||||
    logger.info("  Total optimization steps = %d", t_total)
 | 
			
		||||
 | 
			
		||||
    global_step = 0
 | 
			
		||||
    tr_loss, logging_loss = 0.0, 0.0
 | 
			
		||||
    best_dev_acc, best_dev_loss = 0.0, 99999999999.0
 | 
			
		||||
    best_steps = 0
 | 
			
		||||
    model.zero_grad()
 | 
			
		||||
    train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0])
 | 
			
		||||
    set_seed(args)  # Added here for reproductibility (even between python 2 and 3)
 | 
			
		||||
    for _ in train_iterator:
 | 
			
		||||
        epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])
 | 
			
		||||
        for step, batch in enumerate(epoch_iterator):
 | 
			
		||||
            model.train()
 | 
			
		||||
            batch = tuple(t.to(args.device) for t in batch)
 | 
			
		||||
            inputs = {'input_ids':      batch[0],
 | 
			
		||||
                      'attention_mask': batch[1],
 | 
			
		||||
                      'token_type_ids': batch[2] if args.model_type in ['bert', 'xlnet'] else None,  # XLM don't use segment_ids
 | 
			
		||||
                      'labels':         batch[3]}
 | 
			
		||||
            outputs = model(**inputs)
 | 
			
		||||
            loss = outputs[0]  # model outputs are always tuple in transformers (see doc)
 | 
			
		||||
 | 
			
		||||
            if args.n_gpu > 1:
 | 
			
		||||
                loss = loss.mean() # mean() to average on multi-gpu parallel training
 | 
			
		||||
            if args.gradient_accumulation_steps > 1:
 | 
			
		||||
                loss = loss / args.gradient_accumulation_steps
 | 
			
		||||
 | 
			
		||||
            if args.fp16:
 | 
			
		||||
                with amp.scale_loss(loss, optimizer) as scaled_loss:
 | 
			
		||||
                    scaled_loss.backward()
 | 
			
		||||
                torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
 | 
			
		||||
            else:
 | 
			
		||||
                loss.backward()
 | 
			
		||||
                torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
 | 
			
		||||
 | 
			
		||||
            tr_loss += loss.item()
 | 
			
		||||
            if (step + 1) % args.gradient_accumulation_steps == 0:
 | 
			
		||||
 | 
			
		||||
                optimizer.step()
 | 
			
		||||
                scheduler.step()  # Update learning rate schedule
 | 
			
		||||
                model.zero_grad()
 | 
			
		||||
                global_step += 1
 | 
			
		||||
 | 
			
		||||
                if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
 | 
			
		||||
                    # Log metrics
 | 
			
		||||
                    if args.local_rank == -1 and args.evaluate_during_training:  # Only evaluate when single GPU otherwise metrics may not average well
 | 
			
		||||
                        results = evaluate(args, model, tokenizer)
 | 
			
		||||
                        for key, value in results.items():
 | 
			
		||||
                            tb_writer.add_scalar('eval_{}'.format(key), value, global_step)
 | 
			
		||||
                        if results["eval_acc"] > best_dev_acc:
 | 
			
		||||
                            best_dev_acc = results["eval_acc"]
 | 
			
		||||
                            best_dev_loss = results["eval_loss"]
 | 
			
		||||
                            best_steps = global_step
 | 
			
		||||
                            if args.do_test:
 | 
			
		||||
                                results_test = evaluate(args, model, tokenizer, test=True)
 | 
			
		||||
                                for key, value in results_test.items():
 | 
			
		||||
                                    tb_writer.add_scalar('test_{}'.format(key), value, global_step)
 | 
			
		||||
                                logger.info("test acc: %s, loss: %s, global steps: %s", str(results_test['eval_acc']), str(results_test['eval_loss']), str(global_step))
 | 
			
		||||
                    tb_writer.add_scalar('lr', scheduler.get_lr()[0], global_step)
 | 
			
		||||
                    tb_writer.add_scalar('loss', (tr_loss - logging_loss)/args.logging_steps, global_step)
 | 
			
		||||
                    logger.info("Average loss: %s at global step: %s", str((tr_loss - logging_loss)/args.logging_steps), str(global_step))
 | 
			
		||||
                    logging_loss = tr_loss
 | 
			
		||||
 | 
			
		||||
                if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:
 | 
			
		||||
                    # Save model checkpoint
 | 
			
		||||
                    output_dir = os.path.join(args.output_dir, 'checkpoint-{}'.format(global_step))
 | 
			
		||||
                    if not os.path.exists(output_dir):
 | 
			
		||||
                        os.makedirs(output_dir)
 | 
			
		||||
                    model_to_save = model.module if hasattr(model, 'module') else model  # Take care of distributed/parallel training
 | 
			
		||||
                    model_to_save.save_pretrained(output_dir)
 | 
			
		||||
                    tokenizer.save_vocabulary(output_dir)
 | 
			
		||||
                    torch.save(args, os.path.join(output_dir, 'training_args.bin'))
 | 
			
		||||
                    logger.info("Saving model checkpoint to %s", output_dir)
 | 
			
		||||
 | 
			
		||||
            if args.max_steps > 0 and global_step > args.max_steps:
 | 
			
		||||
                epoch_iterator.close()
 | 
			
		||||
                break
 | 
			
		||||
        if args.max_steps > 0 and global_step > args.max_steps:
 | 
			
		||||
            train_iterator.close()
 | 
			
		||||
            break
 | 
			
		||||
 | 
			
		||||
    if args.local_rank in [-1, 0]:
 | 
			
		||||
        tb_writer.close()
 | 
			
		||||
 | 
			
		||||
    return global_step, tr_loss / global_step, best_steps
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def evaluate(args, model, tokenizer, prefix="", test=False):
 | 
			
		||||
    eval_task_names = (args.task_name,)
 | 
			
		||||
    eval_outputs_dirs = (args.output_dir,)
 | 
			
		||||
 | 
			
		||||
    results = {}
 | 
			
		||||
    for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs):
 | 
			
		||||
        eval_dataset = load_and_cache_examples(args, eval_task, tokenizer, evaluate=not test, test=test)
 | 
			
		||||
 | 
			
		||||
        if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
 | 
			
		||||
            os.makedirs(eval_output_dir)
 | 
			
		||||
 | 
			
		||||
        args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
 | 
			
		||||
        # Note that DistributedSampler samples randomly
 | 
			
		||||
        eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)
 | 
			
		||||
        eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
 | 
			
		||||
 | 
			
		||||
        # Eval!
 | 
			
		||||
        logger.info("***** Running evaluation {} *****".format(prefix))
 | 
			
		||||
        logger.info("  Num examples = %d", len(eval_dataset))
 | 
			
		||||
        logger.info("  Batch size = %d", args.eval_batch_size)
 | 
			
		||||
        eval_loss = 0.0
 | 
			
		||||
        nb_eval_steps = 0
 | 
			
		||||
        preds = None
 | 
			
		||||
        out_label_ids = None
 | 
			
		||||
        for batch in tqdm(eval_dataloader, desc="Evaluating"):
 | 
			
		||||
            model.eval()
 | 
			
		||||
            batch = tuple(t.to(args.device) for t in batch)
 | 
			
		||||
 | 
			
		||||
            with torch.no_grad():
 | 
			
		||||
                inputs = {'input_ids':      batch[0],
 | 
			
		||||
                          'attention_mask': batch[1],
 | 
			
		||||
                          'token_type_ids': batch[2] if args.model_type in ['bert', 'xlnet'] else None,  # XLM don't use segment_ids
 | 
			
		||||
                          'labels':         batch[3]}
 | 
			
		||||
                outputs = model(**inputs)
 | 
			
		||||
                tmp_eval_loss, logits = outputs[:2]
 | 
			
		||||
 | 
			
		||||
                eval_loss += tmp_eval_loss.mean().item()
 | 
			
		||||
            nb_eval_steps += 1
 | 
			
		||||
            if preds is None:
 | 
			
		||||
                preds = logits.detach().cpu().numpy()
 | 
			
		||||
                out_label_ids = inputs['labels'].detach().cpu().numpy()
 | 
			
		||||
            else:
 | 
			
		||||
                preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
 | 
			
		||||
                out_label_ids = np.append(out_label_ids, inputs['labels'].detach().cpu().numpy(), axis=0)
 | 
			
		||||
 | 
			
		||||
        eval_loss = eval_loss / nb_eval_steps
 | 
			
		||||
        preds = np.argmax(preds, axis=1)
 | 
			
		||||
        acc = simple_accuracy(preds, out_label_ids)
 | 
			
		||||
        result = {"eval_acc": acc, "eval_loss": eval_loss}
 | 
			
		||||
        results.update(result)
 | 
			
		||||
 | 
			
		||||
        output_eval_file = os.path.join(eval_output_dir, "is_test_" + str(test).lower() + "_eval_results.txt")
 | 
			
		||||
 | 
			
		||||
        with open(output_eval_file, "w") as writer:
 | 
			
		||||
            logger.info("***** Eval results {} *****".format(str(prefix) + " is test:" + str(test)))
 | 
			
		||||
            writer.write("model           =%s\n" % str(args.model_name_or_path))
 | 
			
		||||
            writer.write("total batch size=%d\n" % (args.per_gpu_train_batch_size * args.gradient_accumulation_steps *
 | 
			
		||||
                         (torch.distributed.get_world_size() if args.local_rank != -1 else 1)))
 | 
			
		||||
            writer.write("train num epochs=%d\n" % args.num_train_epochs)
 | 
			
		||||
            writer.write("fp16            =%s\n" % args.fp16)
 | 
			
		||||
            writer.write("max seq length  =%d\n" % args.max_seq_length)
 | 
			
		||||
            for key in sorted(result.keys()):
 | 
			
		||||
                logger.info("  %s = %s", key, str(result[key]))
 | 
			
		||||
                writer.write("%s = %s\n" % (key, str(result[key])))
 | 
			
		||||
    return results
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def load_and_cache_examples(args, task, tokenizer, evaluate=False, test=False):
 | 
			
		||||
    if args.local_rank not in [-1, 0]:
 | 
			
		||||
        torch.distributed.barrier()  # Make sure only the first process in distributed training process the dataset, and the others will use the cache
 | 
			
		||||
 | 
			
		||||
    processor = processors[task]()
 | 
			
		||||
    # Load data features from cache or dataset file
 | 
			
		||||
    if evaluate:
 | 
			
		||||
        cached_mode = 'dev'
 | 
			
		||||
    elif test:
 | 
			
		||||
        cached_mode = 'test'
 | 
			
		||||
    else:
 | 
			
		||||
        cached_mode = 'train'
 | 
			
		||||
    assert (evaluate == True and test == True) == False
 | 
			
		||||
    cached_features_file = os.path.join(args.data_dir, 'cached_{}_{}_{}_{}'.format(
 | 
			
		||||
        cached_mode,
 | 
			
		||||
        list(filter(None, args.model_name_or_path.split('/'))).pop(),
 | 
			
		||||
        str(args.max_seq_length),
 | 
			
		||||
        str(task)))
 | 
			
		||||
    if os.path.exists(cached_features_file):
 | 
			
		||||
        logger.info("Loading features from cached file %s", cached_features_file)
 | 
			
		||||
        features = torch.load(cached_features_file)
 | 
			
		||||
    else:
 | 
			
		||||
        logger.info("Creating features from dataset file at %s", args.data_dir)
 | 
			
		||||
        label_list = processor.get_labels()
 | 
			
		||||
        if evaluate:
 | 
			
		||||
            examples = processor.get_dev_examples(args.data_dir)
 | 
			
		||||
        elif test:
 | 
			
		||||
            examples = processor.get_test_examples(args.data_dir)
 | 
			
		||||
        else:
 | 
			
		||||
            examples = processor.get_train_examples(args.data_dir)
 | 
			
		||||
        logger.info("Training number: %s", str(len(examples)))
 | 
			
		||||
        features = convert_examples_to_features(examples, label_list, args.max_seq_length, tokenizer,
 | 
			
		||||
            cls_token_at_end=bool(args.model_type in ['xlnet']),            # xlnet has a cls token at the end
 | 
			
		||||
            cls_token=tokenizer.cls_token,
 | 
			
		||||
            sep_token=tokenizer.sep_token,
 | 
			
		||||
            sep_token_extra=bool(args.model_type in ['roberta']),
 | 
			
		||||
            cls_token_segment_id=2 if args.model_type in ['xlnet'] else 0,
 | 
			
		||||
            pad_on_left=bool(args.model_type in ['xlnet']),                 # pad on the left for xlnet
 | 
			
		||||
            pad_token_segment_id=4 if args.model_type in ['xlnet'] else 0)
 | 
			
		||||
        if args.local_rank in [-1, 0]:
 | 
			
		||||
            logger.info("Saving features into cached file %s", cached_features_file)
 | 
			
		||||
            torch.save(features, cached_features_file)
 | 
			
		||||
 | 
			
		||||
    if args.local_rank == 0:
 | 
			
		||||
        torch.distributed.barrier()  # Make sure only the first process in distributed training process the dataset, and the others will use the cache
 | 
			
		||||
 | 
			
		||||
    # Convert to Tensors and build dataset
 | 
			
		||||
    all_input_ids = torch.tensor(select_field(features, 'input_ids'), dtype=torch.long)
 | 
			
		||||
    all_input_mask = torch.tensor(select_field(features, 'input_mask'), dtype=torch.long)
 | 
			
		||||
    all_segment_ids = torch.tensor(select_field(features, 'segment_ids'), dtype=torch.long)
 | 
			
		||||
    all_label_ids = torch.tensor([f.label for f in features], dtype=torch.long)
 | 
			
		||||
 | 
			
		||||
    dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
 | 
			
		||||
    return dataset
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def main():
 | 
			
		||||
    parser = argparse.ArgumentParser()
 | 
			
		||||
 | 
			
		||||
    ## Required parameters
 | 
			
		||||
    parser.add_argument("--data_dir", default=None, type=str, required=True,
 | 
			
		||||
                        help="The input data dir. Should contain the .tsv files (or other data files) for the task.")
 | 
			
		||||
    parser.add_argument("--model_type", default=None, type=str, required=True,
 | 
			
		||||
                        help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()))
 | 
			
		||||
    parser.add_argument("--model_name_or_path", default=None, type=str, required=True,
 | 
			
		||||
                        help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(ALL_MODELS))
 | 
			
		||||
    parser.add_argument("--task_name", default=None, type=str, required=True,
 | 
			
		||||
                        help="The name of the task to train selected in the list: " + ", ".join(processors.keys()))
 | 
			
		||||
    parser.add_argument("--output_dir", default=None, type=str, required=True,
 | 
			
		||||
                        help="The output directory where the model predictions and checkpoints will be written.")
 | 
			
		||||
 | 
			
		||||
    ## Other parameters
 | 
			
		||||
    parser.add_argument("--config_name", default="", type=str,
 | 
			
		||||
                        help="Pretrained config name or path if not the same as model_name")
 | 
			
		||||
    parser.add_argument("--tokenizer_name", default="", type=str,
 | 
			
		||||
                        help="Pretrained tokenizer name or path if not the same as model_name")
 | 
			
		||||
    parser.add_argument("--cache_dir", default="", type=str,
 | 
			
		||||
                        help="Where do you want to store the pre-trained models downloaded from s3")
 | 
			
		||||
    parser.add_argument("--max_seq_length", default=128, type=int,
 | 
			
		||||
                        help="The maximum total input sequence length after tokenization. Sequences longer "
 | 
			
		||||
                             "than this will be truncated, sequences shorter will be padded.")
 | 
			
		||||
    parser.add_argument("--do_train", action='store_true',
 | 
			
		||||
                        help="Whether to run training.")
 | 
			
		||||
    parser.add_argument("--do_eval", action='store_true',
 | 
			
		||||
                        help="Whether to run eval on the dev set.")
 | 
			
		||||
    parser.add_argument("--do_test", action='store_true', help='Whether to run test on the test set')
 | 
			
		||||
    parser.add_argument("--evaluate_during_training", action='store_true',
 | 
			
		||||
                        help="Rul evaluation during training at each logging step.")
 | 
			
		||||
    parser.add_argument("--do_lower_case", action='store_true',
 | 
			
		||||
                        help="Set this flag if you are using an uncased model.")
 | 
			
		||||
 | 
			
		||||
    parser.add_argument("--per_gpu_train_batch_size", default=8, type=int,
 | 
			
		||||
                        help="Batch size per GPU/CPU for training.")
 | 
			
		||||
    parser.add_argument("--per_gpu_eval_batch_size", default=8, type=int,
 | 
			
		||||
                        help="Batch size per GPU/CPU for evaluation.")
 | 
			
		||||
    parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
 | 
			
		||||
                        help="Number of updates steps to accumulate before performing a backward/update pass.")
 | 
			
		||||
    parser.add_argument("--learning_rate", default=5e-5, type=float,
 | 
			
		||||
                        help="The initial learning rate for Adam.")
 | 
			
		||||
    parser.add_argument("--weight_decay", default=0.0, type=float,
 | 
			
		||||
                        help="Weight deay if we apply some.")
 | 
			
		||||
    parser.add_argument("--adam_epsilon", default=1e-8, type=float,
 | 
			
		||||
                        help="Epsilon for Adam optimizer.")
 | 
			
		||||
    parser.add_argument("--max_grad_norm", default=1.0, type=float,
 | 
			
		||||
                        help="Max gradient norm.")
 | 
			
		||||
    parser.add_argument("--num_train_epochs", default=3.0, type=float,
 | 
			
		||||
                        help="Total number of training epochs to perform.")
 | 
			
		||||
    parser.add_argument("--max_steps", default=-1, type=int,
 | 
			
		||||
                        help="If > 0: set total number of training steps to perform. Override num_train_epochs.")
 | 
			
		||||
    parser.add_argument("--warmup_steps", default=0, type=int,
 | 
			
		||||
                        help="Linear warmup over warmup_steps.")
 | 
			
		||||
 | 
			
		||||
    parser.add_argument('--logging_steps', type=int, default=50,
 | 
			
		||||
                        help="Log every X updates steps.")
 | 
			
		||||
    parser.add_argument('--save_steps', type=int, default=50,
 | 
			
		||||
                        help="Save checkpoint every X updates steps.")
 | 
			
		||||
    parser.add_argument("--eval_all_checkpoints", action='store_true',
 | 
			
		||||
                        help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number")
 | 
			
		||||
    parser.add_argument("--no_cuda", action='store_true',
 | 
			
		||||
                        help="Avoid using CUDA when available")
 | 
			
		||||
    parser.add_argument('--overwrite_output_dir', action='store_true',
 | 
			
		||||
                        help="Overwrite the content of the output directory")
 | 
			
		||||
    parser.add_argument('--overwrite_cache', action='store_true',
 | 
			
		||||
                        help="Overwrite the cached training and evaluation sets")
 | 
			
		||||
    parser.add_argument('--seed', type=int, default=42,
 | 
			
		||||
                        help="random seed for initialization")
 | 
			
		||||
 | 
			
		||||
    parser.add_argument('--fp16', action='store_true',
 | 
			
		||||
                        help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit")
 | 
			
		||||
    parser.add_argument('--fp16_opt_level', type=str, default='O1',
 | 
			
		||||
                        help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
 | 
			
		||||
                             "See details at https://nvidia.github.io/apex/amp.html")
 | 
			
		||||
    parser.add_argument("--local_rank", type=int, default=-1,
 | 
			
		||||
                        help="For distributed training: local_rank")
 | 
			
		||||
    parser.add_argument('--server_ip', type=str, default='', help="For distant debugging.")
 | 
			
		||||
    parser.add_argument('--server_port', type=str, default='', help="For distant debugging.")
 | 
			
		||||
    args = parser.parse_args()
 | 
			
		||||
 | 
			
		||||
    if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir:
 | 
			
		||||
        raise ValueError("Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(args.output_dir))
 | 
			
		||||
 | 
			
		||||
    # Setup distant debugging if needed
 | 
			
		||||
    if args.server_ip and args.server_port:
 | 
			
		||||
        # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
 | 
			
		||||
        import ptvsd
 | 
			
		||||
        print("Waiting for debugger attach")
 | 
			
		||||
        ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
 | 
			
		||||
        ptvsd.wait_for_attach()
 | 
			
		||||
 | 
			
		||||
    # Setup CUDA, GPU & distributed training
 | 
			
		||||
    if args.local_rank == -1 or args.no_cuda:
 | 
			
		||||
        device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
 | 
			
		||||
        args.n_gpu = torch.cuda.device_count()
 | 
			
		||||
    else:  # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
 | 
			
		||||
        torch.cuda.set_device(args.local_rank)
 | 
			
		||||
        device = torch.device("cuda", args.local_rank)
 | 
			
		||||
        torch.distributed.init_process_group(backend='nccl')
 | 
			
		||||
        args.n_gpu = 1
 | 
			
		||||
    args.device = device
 | 
			
		||||
 | 
			
		||||
    # Setup logging
 | 
			
		||||
    logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s -   %(message)s',
 | 
			
		||||
                        datefmt = '%m/%d/%Y %H:%M:%S',
 | 
			
		||||
                        level = logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
 | 
			
		||||
    logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
 | 
			
		||||
                    args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16)
 | 
			
		||||
 | 
			
		||||
    # Set seed
 | 
			
		||||
    set_seed(args)
 | 
			
		||||
 | 
			
		||||
    # Prepare GLUE task
 | 
			
		||||
    args.task_name = args.task_name.lower()
 | 
			
		||||
    if args.task_name not in processors:
 | 
			
		||||
        raise ValueError("Task not found: %s" % (args.task_name))
 | 
			
		||||
    processor = processors[args.task_name]()
 | 
			
		||||
    label_list = processor.get_labels()
 | 
			
		||||
    num_labels = len(label_list)
 | 
			
		||||
 | 
			
		||||
    # Load pretrained model and tokenizer
 | 
			
		||||
    if args.local_rank not in [-1, 0]:
 | 
			
		||||
        torch.distributed.barrier()  # Make sure only the first process in distributed training will download model & vocab
 | 
			
		||||
 | 
			
		||||
    args.model_type = args.model_type.lower()
 | 
			
		||||
    config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
 | 
			
		||||
    config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path, num_labels=num_labels, finetuning_task=args.task_name)
 | 
			
		||||
    tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, do_lower_case=args.do_lower_case)
 | 
			
		||||
    model = model_class.from_pretrained(args.model_name_or_path, from_tf=bool('.ckpt' in args.model_name_or_path), config=config)
 | 
			
		||||
 | 
			
		||||
    if args.local_rank == 0:
 | 
			
		||||
        torch.distributed.barrier()  # Make sure only the first process in distributed training will download model & vocab
 | 
			
		||||
 | 
			
		||||
    model.to(args.device)
 | 
			
		||||
 | 
			
		||||
    logger.info("Training/evaluation parameters %s", args)
 | 
			
		||||
    best_steps = 0
 | 
			
		||||
 | 
			
		||||
    # Training
 | 
			
		||||
    if args.do_train:
 | 
			
		||||
        train_dataset = load_and_cache_examples(args, args.task_name, tokenizer, evaluate=False)
 | 
			
		||||
        global_step, tr_loss, best_steps = train(args, train_dataset, model, tokenizer)
 | 
			
		||||
        logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    # Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained()
 | 
			
		||||
    if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
 | 
			
		||||
        # Create output directory if needed
 | 
			
		||||
        if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
 | 
			
		||||
            os.makedirs(args.output_dir)
 | 
			
		||||
 | 
			
		||||
        logger.info("Saving model checkpoint to %s", args.output_dir)
 | 
			
		||||
        # Save a trained model, configuration and tokenizer using `save_pretrained()`.
 | 
			
		||||
        # They can then be reloaded using `from_pretrained()`
 | 
			
		||||
        model_to_save = model.module if hasattr(model, 'module') else model  # Take care of distributed/parallel training
 | 
			
		||||
        model_to_save.save_pretrained(args.output_dir)
 | 
			
		||||
        tokenizer.save_pretrained(args.output_dir)
 | 
			
		||||
 | 
			
		||||
        # Good practice: save your training arguments together with the trained model
 | 
			
		||||
        torch.save(args, os.path.join(args.output_dir, 'training_args.bin'))
 | 
			
		||||
 | 
			
		||||
        # Load a trained model and vocabulary that you have fine-tuned
 | 
			
		||||
        model = model_class.from_pretrained(args.output_dir)
 | 
			
		||||
        tokenizer = tokenizer_class.from_pretrained(args.output_dir)
 | 
			
		||||
        model.to(args.device)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    # Evaluation
 | 
			
		||||
    results = {}
 | 
			
		||||
    if args.do_eval and args.local_rank in [-1, 0]:
 | 
			
		||||
        if not args.do_train:
 | 
			
		||||
            args.output_dir = args.model_name_or_path
 | 
			
		||||
        checkpoints = [args.output_dir]
 | 
			
		||||
        if args.eval_all_checkpoints:
 | 
			
		||||
            checkpoints = list(os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True)))
 | 
			
		||||
            logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN)  # Reduce logging
 | 
			
		||||
        logger.info("Evaluate the following checkpoints: %s", checkpoints)
 | 
			
		||||
        for checkpoint in checkpoints:
 | 
			
		||||
            global_step = checkpoint.split('-')[-1] if len(checkpoints) > 1 else ""
 | 
			
		||||
            model = model_class.from_pretrained(checkpoint)
 | 
			
		||||
            model.to(args.device)
 | 
			
		||||
            result = evaluate(args, model, tokenizer, prefix=global_step)
 | 
			
		||||
            result = dict((k + '_{}'.format(global_step), v) for k, v in result.items())
 | 
			
		||||
            results.update(result)
 | 
			
		||||
 | 
			
		||||
    if args.do_test and args.local_rank in [-1, 0]:
 | 
			
		||||
        if not args.do_train:
 | 
			
		||||
            args.output_dir = args.model_name_or_path
 | 
			
		||||
        checkpoints = [args.output_dir]
 | 
			
		||||
        # if args.eval_all_checkpoints: # can not use this to do test!!
 | 
			
		||||
        #     checkpoints = list(os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True)))
 | 
			
		||||
        #     logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN)  # Reduce logging
 | 
			
		||||
        logger.info("Evaluate the following checkpoints: %s", checkpoints)
 | 
			
		||||
        for checkpoint in checkpoints:
 | 
			
		||||
            global_step = checkpoint.split('-')[-1] if len(checkpoints) > 1 else ""
 | 
			
		||||
            model = model_class.from_pretrained(checkpoint)
 | 
			
		||||
            model.to(args.device)
 | 
			
		||||
            result = evaluate(args, model, tokenizer, prefix=global_step, test=True)
 | 
			
		||||
            result = dict((k + '_{}'.format(global_step), v) for k, v in result.items())
 | 
			
		||||
            results.update(result)
 | 
			
		||||
    if best_steps:
 | 
			
		||||
        logger.info("best steps of eval acc is the following checkpoints: %s", best_steps)
 | 
			
		||||
    return results
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
if __name__ == "__main__":
 | 
			
		||||
    main()
 | 
			
		||||
@ -1,554 +0,0 @@
 | 
			
		||||
# coding=utf-8
 | 
			
		||||
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
 | 
			
		||||
# Copyright (c) 2018, NVIDIA CORPORATION.  All rights reserved.
 | 
			
		||||
#
 | 
			
		||||
# Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
# you may not use this file except in compliance with the License.
 | 
			
		||||
# You may obtain a copy of the License at
 | 
			
		||||
#
 | 
			
		||||
#     http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
#
 | 
			
		||||
# Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
# distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
# See the License for the specific language governing permissions and
 | 
			
		||||
# limitations under the License.
 | 
			
		||||
"""BERT finetuning runner."""
 | 
			
		||||
 | 
			
		||||
from __future__ import absolute_import
 | 
			
		||||
 | 
			
		||||
import argparse
 | 
			
		||||
import csv
 | 
			
		||||
import logging
 | 
			
		||||
import os
 | 
			
		||||
import random
 | 
			
		||||
import sys
 | 
			
		||||
from io import open
 | 
			
		||||
 | 
			
		||||
import numpy as np
 | 
			
		||||
import torch
 | 
			
		||||
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
 | 
			
		||||
                              TensorDataset)
 | 
			
		||||
from torch.utils.data.distributed import DistributedSampler
 | 
			
		||||
from tqdm import tqdm, trange
 | 
			
		||||
 | 
			
		||||
from pytorch_pretrained_bert.file_utils import PYTORCH_PRETRAINED_BERT_CACHE, WEIGHTS_NAME, CONFIG_NAME
 | 
			
		||||
from pytorch_pretrained_bert.modeling import BertForMultipleChoice, BertConfig
 | 
			
		||||
from pytorch_pretrained_bert.optimization import BertAdam, WarmupLinearSchedule
 | 
			
		||||
from pytorch_pretrained_bert.tokenization import BertTokenizer
 | 
			
		||||
 | 
			
		||||
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s -   %(message)s',
 | 
			
		||||
                    datefmt = '%m/%d/%Y %H:%M:%S',
 | 
			
		||||
                    level = logging.INFO)
 | 
			
		||||
logger = logging.getLogger(__name__)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class SwagExample(object):
 | 
			
		||||
    """A single training/test example for the SWAG dataset."""
 | 
			
		||||
    def __init__(self,
 | 
			
		||||
                 swag_id,
 | 
			
		||||
                 context_sentence,
 | 
			
		||||
                 start_ending,
 | 
			
		||||
                 ending_0,
 | 
			
		||||
                 ending_1,
 | 
			
		||||
                 ending_2,
 | 
			
		||||
                 ending_3,
 | 
			
		||||
                 label = None):
 | 
			
		||||
        self.swag_id = swag_id
 | 
			
		||||
        self.context_sentence = context_sentence
 | 
			
		||||
        self.start_ending = start_ending
 | 
			
		||||
        self.endings = [
 | 
			
		||||
            ending_0,
 | 
			
		||||
            ending_1,
 | 
			
		||||
            ending_2,
 | 
			
		||||
            ending_3,
 | 
			
		||||
        ]
 | 
			
		||||
        self.label = label
 | 
			
		||||
 | 
			
		||||
    def __str__(self):
 | 
			
		||||
        return self.__repr__()
 | 
			
		||||
 | 
			
		||||
    def __repr__(self):
 | 
			
		||||
        l = [
 | 
			
		||||
            "swag_id: {}".format(self.swag_id),
 | 
			
		||||
            "context_sentence: {}".format(self.context_sentence),
 | 
			
		||||
            "start_ending: {}".format(self.start_ending),
 | 
			
		||||
            "ending_0: {}".format(self.endings[0]),
 | 
			
		||||
            "ending_1: {}".format(self.endings[1]),
 | 
			
		||||
            "ending_2: {}".format(self.endings[2]),
 | 
			
		||||
            "ending_3: {}".format(self.endings[3]),
 | 
			
		||||
        ]
 | 
			
		||||
 | 
			
		||||
        if self.label is not None:
 | 
			
		||||
            l.append("label: {}".format(self.label))
 | 
			
		||||
 | 
			
		||||
        return ", ".join(l)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class InputFeatures(object):
 | 
			
		||||
    def __init__(self,
 | 
			
		||||
                 example_id,
 | 
			
		||||
                 choices_features,
 | 
			
		||||
                 label
 | 
			
		||||
 | 
			
		||||
    ):
 | 
			
		||||
        self.example_id = example_id
 | 
			
		||||
        self.choices_features = [
 | 
			
		||||
            {
 | 
			
		||||
                'input_ids': input_ids,
 | 
			
		||||
                'input_mask': input_mask,
 | 
			
		||||
                'segment_ids': segment_ids
 | 
			
		||||
            }
 | 
			
		||||
            for _, input_ids, input_mask, segment_ids in choices_features
 | 
			
		||||
        ]
 | 
			
		||||
        self.label = label
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def read_swag_examples(input_file, is_training):
 | 
			
		||||
    with open(input_file, 'r', encoding='utf-8') as f:
 | 
			
		||||
        reader = csv.reader(f)
 | 
			
		||||
        lines = []
 | 
			
		||||
        for line in reader:
 | 
			
		||||
            if sys.version_info[0] == 2:
 | 
			
		||||
                line = list(unicode(cell, 'utf-8') for cell in line)
 | 
			
		||||
            lines.append(line)
 | 
			
		||||
 | 
			
		||||
    if is_training and lines[0][-1] != 'label':
 | 
			
		||||
        raise ValueError(
 | 
			
		||||
            "For training, the input file must contain a label column."
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
    examples = [
 | 
			
		||||
        SwagExample(
 | 
			
		||||
            swag_id = line[2],
 | 
			
		||||
            context_sentence = line[4],
 | 
			
		||||
            start_ending = line[5], # in the swag dataset, the
 | 
			
		||||
                                         # common beginning of each
 | 
			
		||||
                                         # choice is stored in "sent2".
 | 
			
		||||
            ending_0 = line[7],
 | 
			
		||||
            ending_1 = line[8],
 | 
			
		||||
            ending_2 = line[9],
 | 
			
		||||
            ending_3 = line[10],
 | 
			
		||||
            label = int(line[11]) if is_training else None
 | 
			
		||||
        ) for line in lines[1:] # we skip the line with the column names
 | 
			
		||||
    ]
 | 
			
		||||
 | 
			
		||||
    return examples
 | 
			
		||||
 | 
			
		||||
def convert_examples_to_features(examples, tokenizer, max_seq_length,
 | 
			
		||||
                                 is_training):
 | 
			
		||||
    """Loads a data file into a list of `InputBatch`s."""
 | 
			
		||||
 | 
			
		||||
    # Swag is a multiple choice task. To perform this task using Bert,
 | 
			
		||||
    # we will use the formatting proposed in "Improving Language
 | 
			
		||||
    # Understanding by Generative Pre-Training" and suggested by
 | 
			
		||||
    # @jacobdevlin-google in this issue
 | 
			
		||||
    # https://github.com/google-research/bert/issues/38.
 | 
			
		||||
    #
 | 
			
		||||
    # Each choice will correspond to a sample on which we run the
 | 
			
		||||
    # inference. For a given Swag example, we will create the 4
 | 
			
		||||
    # following inputs:
 | 
			
		||||
    # - [CLS] context [SEP] choice_1 [SEP]
 | 
			
		||||
    # - [CLS] context [SEP] choice_2 [SEP]
 | 
			
		||||
    # - [CLS] context [SEP] choice_3 [SEP]
 | 
			
		||||
    # - [CLS] context [SEP] choice_4 [SEP]
 | 
			
		||||
    # The model will output a single value for each input. To get the
 | 
			
		||||
    # final decision of the model, we will run a softmax over these 4
 | 
			
		||||
    # outputs.
 | 
			
		||||
    features = []
 | 
			
		||||
    for example_index, example in enumerate(examples):
 | 
			
		||||
        context_tokens = tokenizer.tokenize(example.context_sentence)
 | 
			
		||||
        start_ending_tokens = tokenizer.tokenize(example.start_ending)
 | 
			
		||||
 | 
			
		||||
        choices_features = []
 | 
			
		||||
        for ending_index, ending in enumerate(example.endings):
 | 
			
		||||
            # We create a copy of the context tokens in order to be
 | 
			
		||||
            # able to shrink it according to ending_tokens
 | 
			
		||||
            context_tokens_choice = context_tokens[:]
 | 
			
		||||
            ending_tokens = start_ending_tokens + tokenizer.tokenize(ending)
 | 
			
		||||
            # Modifies `context_tokens_choice` and `ending_tokens` in
 | 
			
		||||
            # place so that the total length is less than the
 | 
			
		||||
            # specified length.  Account for [CLS], [SEP], [SEP] with
 | 
			
		||||
            # "- 3"
 | 
			
		||||
            _truncate_seq_pair(context_tokens_choice, ending_tokens, max_seq_length - 3)
 | 
			
		||||
 | 
			
		||||
            tokens = ["[CLS]"] + context_tokens_choice + ["[SEP]"] + ending_tokens + ["[SEP]"]
 | 
			
		||||
            segment_ids = [0] * (len(context_tokens_choice) + 2) + [1] * (len(ending_tokens) + 1)
 | 
			
		||||
 | 
			
		||||
            input_ids = tokenizer.convert_tokens_to_ids(tokens)
 | 
			
		||||
            input_mask = [1] * len(input_ids)
 | 
			
		||||
 | 
			
		||||
            # Zero-pad up to the sequence length.
 | 
			
		||||
            padding = [0] * (max_seq_length - len(input_ids))
 | 
			
		||||
            input_ids += padding
 | 
			
		||||
            input_mask += padding
 | 
			
		||||
            segment_ids += padding
 | 
			
		||||
 | 
			
		||||
            assert len(input_ids) == max_seq_length
 | 
			
		||||
            assert len(input_mask) == max_seq_length
 | 
			
		||||
            assert len(segment_ids) == max_seq_length
 | 
			
		||||
 | 
			
		||||
            choices_features.append((tokens, input_ids, input_mask, segment_ids))
 | 
			
		||||
 | 
			
		||||
        label = example.label
 | 
			
		||||
        if example_index < 5:
 | 
			
		||||
            logger.info("*** Example ***")
 | 
			
		||||
            logger.info("swag_id: {}".format(example.swag_id))
 | 
			
		||||
            for choice_idx, (tokens, input_ids, input_mask, segment_ids) in enumerate(choices_features):
 | 
			
		||||
                logger.info("choice: {}".format(choice_idx))
 | 
			
		||||
                logger.info("tokens: {}".format(' '.join(tokens)))
 | 
			
		||||
                logger.info("input_ids: {}".format(' '.join(map(str, input_ids))))
 | 
			
		||||
                logger.info("input_mask: {}".format(' '.join(map(str, input_mask))))
 | 
			
		||||
                logger.info("segment_ids: {}".format(' '.join(map(str, segment_ids))))
 | 
			
		||||
            if is_training:
 | 
			
		||||
                logger.info("label: {}".format(label))
 | 
			
		||||
 | 
			
		||||
        features.append(
 | 
			
		||||
            InputFeatures(
 | 
			
		||||
                example_id = example.swag_id,
 | 
			
		||||
                choices_features = choices_features,
 | 
			
		||||
                label = label
 | 
			
		||||
            )
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
    return features
 | 
			
		||||
 | 
			
		||||
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
 | 
			
		||||
    """Truncates a sequence pair in place to the maximum length."""
 | 
			
		||||
 | 
			
		||||
    # This is a simple heuristic which will always truncate the longer sequence
 | 
			
		||||
    # one token at a time. This makes more sense than truncating an equal percent
 | 
			
		||||
    # of tokens from each, since if one sequence is very short then each token
 | 
			
		||||
    # that's truncated likely contains more information than a longer sequence.
 | 
			
		||||
    while True:
 | 
			
		||||
        total_length = len(tokens_a) + len(tokens_b)
 | 
			
		||||
        if total_length <= max_length:
 | 
			
		||||
            break
 | 
			
		||||
        if len(tokens_a) > len(tokens_b):
 | 
			
		||||
            tokens_a.pop()
 | 
			
		||||
        else:
 | 
			
		||||
            tokens_b.pop()
 | 
			
		||||
 | 
			
		||||
def accuracy(out, labels):
 | 
			
		||||
    outputs = np.argmax(out, axis=1)
 | 
			
		||||
    return np.sum(outputs == labels)
 | 
			
		||||
 | 
			
		||||
def select_field(features, field):
 | 
			
		||||
    return [
 | 
			
		||||
        [
 | 
			
		||||
            choice[field]
 | 
			
		||||
            for choice in feature.choices_features
 | 
			
		||||
        ]
 | 
			
		||||
        for feature in features
 | 
			
		||||
    ]
 | 
			
		||||
 | 
			
		||||
def main():
 | 
			
		||||
    parser = argparse.ArgumentParser()
 | 
			
		||||
 | 
			
		||||
    ## Required parameters
 | 
			
		||||
    parser.add_argument("--data_dir",
 | 
			
		||||
                        default=None,
 | 
			
		||||
                        type=str,
 | 
			
		||||
                        required=True,
 | 
			
		||||
                        help="The input data dir. Should contain the .csv files (or other data files) for the task.")
 | 
			
		||||
    parser.add_argument("--bert_model", default=None, type=str, required=True,
 | 
			
		||||
                        help="Bert pre-trained model selected in the list: bert-base-uncased, "
 | 
			
		||||
                        "bert-large-uncased, bert-base-cased, bert-large-cased, bert-base-multilingual-uncased, "
 | 
			
		||||
                        "bert-base-multilingual-cased, bert-base-chinese.")
 | 
			
		||||
    parser.add_argument("--output_dir",
 | 
			
		||||
                        default=None,
 | 
			
		||||
                        type=str,
 | 
			
		||||
                        required=True,
 | 
			
		||||
                        help="The output directory where the model checkpoints will be written.")
 | 
			
		||||
 | 
			
		||||
    ## Other parameters
 | 
			
		||||
    parser.add_argument("--max_seq_length",
 | 
			
		||||
                        default=128,
 | 
			
		||||
                        type=int,
 | 
			
		||||
                        help="The maximum total input sequence length after WordPiece tokenization. \n"
 | 
			
		||||
                             "Sequences longer than this will be truncated, and sequences shorter \n"
 | 
			
		||||
                             "than this will be padded.")
 | 
			
		||||
    parser.add_argument("--do_train",
 | 
			
		||||
                        action='store_true',
 | 
			
		||||
                        help="Whether to run training.")
 | 
			
		||||
    parser.add_argument("--do_eval",
 | 
			
		||||
                        action='store_true',
 | 
			
		||||
                        help="Whether to run eval on the dev set.")
 | 
			
		||||
    parser.add_argument("--do_lower_case",
 | 
			
		||||
                        action='store_true',
 | 
			
		||||
                        help="Set this flag if you are using an uncased model.")
 | 
			
		||||
    parser.add_argument("--train_batch_size",
 | 
			
		||||
                        default=32,
 | 
			
		||||
                        type=int,
 | 
			
		||||
                        help="Total batch size for training.")
 | 
			
		||||
    parser.add_argument("--eval_batch_size",
 | 
			
		||||
                        default=8,
 | 
			
		||||
                        type=int,
 | 
			
		||||
                        help="Total batch size for eval.")
 | 
			
		||||
    parser.add_argument("--learning_rate",
 | 
			
		||||
                        default=5e-5,
 | 
			
		||||
                        type=float,
 | 
			
		||||
                        help="The initial learning rate for Adam.")
 | 
			
		||||
    parser.add_argument("--num_train_epochs",
 | 
			
		||||
                        default=3.0,
 | 
			
		||||
                        type=float,
 | 
			
		||||
                        help="Total number of training epochs to perform.")
 | 
			
		||||
    parser.add_argument("--warmup_proportion",
 | 
			
		||||
                        default=0.1,
 | 
			
		||||
                        type=float,
 | 
			
		||||
                        help="Proportion of training to perform linear learning rate warmup for. "
 | 
			
		||||
                             "E.g., 0.1 = 10%% of training.")
 | 
			
		||||
    parser.add_argument("--no_cuda",
 | 
			
		||||
                        action='store_true',
 | 
			
		||||
                        help="Whether not to use CUDA when available")
 | 
			
		||||
    parser.add_argument("--local_rank",
 | 
			
		||||
                        type=int,
 | 
			
		||||
                        default=-1,
 | 
			
		||||
                        help="local_rank for distributed training on gpus")
 | 
			
		||||
    parser.add_argument('--seed',
 | 
			
		||||
                        type=int,
 | 
			
		||||
                        default=42,
 | 
			
		||||
                        help="random seed for initialization")
 | 
			
		||||
    parser.add_argument('--gradient_accumulation_steps',
 | 
			
		||||
                        type=int,
 | 
			
		||||
                        default=1,
 | 
			
		||||
                        help="Number of updates steps to accumulate before performing a backward/update pass.")
 | 
			
		||||
    parser.add_argument('--fp16',
 | 
			
		||||
                        action='store_true',
 | 
			
		||||
                        help="Whether to use 16-bit float precision instead of 32-bit")
 | 
			
		||||
    parser.add_argument('--loss_scale',
 | 
			
		||||
                        type=float, default=0,
 | 
			
		||||
                        help="Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n"
 | 
			
		||||
                             "0 (default value): dynamic loss scaling.\n"
 | 
			
		||||
                             "Positive power of 2: static loss scaling value.\n")
 | 
			
		||||
 | 
			
		||||
    args = parser.parse_args()
 | 
			
		||||
 | 
			
		||||
    if args.local_rank == -1 or args.no_cuda:
 | 
			
		||||
        device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
 | 
			
		||||
        n_gpu = torch.cuda.device_count()
 | 
			
		||||
    else:
 | 
			
		||||
        torch.cuda.set_device(args.local_rank)
 | 
			
		||||
        device = torch.device("cuda", args.local_rank)
 | 
			
		||||
        n_gpu = 1
 | 
			
		||||
        # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
 | 
			
		||||
        torch.distributed.init_process_group(backend='nccl')
 | 
			
		||||
    logger.info("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format(
 | 
			
		||||
        device, n_gpu, bool(args.local_rank != -1), args.fp16))
 | 
			
		||||
 | 
			
		||||
    if args.gradient_accumulation_steps < 1:
 | 
			
		||||
        raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
 | 
			
		||||
                            args.gradient_accumulation_steps))
 | 
			
		||||
 | 
			
		||||
    args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps
 | 
			
		||||
 | 
			
		||||
    random.seed(args.seed)
 | 
			
		||||
    np.random.seed(args.seed)
 | 
			
		||||
    torch.manual_seed(args.seed)
 | 
			
		||||
    if n_gpu > 0:
 | 
			
		||||
        torch.cuda.manual_seed_all(args.seed)
 | 
			
		||||
 | 
			
		||||
    if not args.do_train and not args.do_eval:
 | 
			
		||||
        raise ValueError("At least one of `do_train` or `do_eval` must be True.")
 | 
			
		||||
 | 
			
		||||
    if os.path.exists(args.output_dir) and os.listdir(args.output_dir):
 | 
			
		||||
        raise ValueError("Output directory ({}) already exists and is not empty.".format(args.output_dir))
 | 
			
		||||
    if not os.path.exists(args.output_dir):
 | 
			
		||||
        os.makedirs(args.output_dir)
 | 
			
		||||
 | 
			
		||||
    tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case)
 | 
			
		||||
 | 
			
		||||
    train_examples = None
 | 
			
		||||
    num_train_optimization_steps = None
 | 
			
		||||
    if args.do_train:
 | 
			
		||||
        train_examples = read_swag_examples(os.path.join(args.data_dir, 'train.csv'), is_training = True)
 | 
			
		||||
        num_train_optimization_steps = int(
 | 
			
		||||
            len(train_examples) / args.train_batch_size / args.gradient_accumulation_steps) * args.num_train_epochs
 | 
			
		||||
        if args.local_rank != -1:
 | 
			
		||||
            num_train_optimization_steps = num_train_optimization_steps // torch.distributed.get_world_size()
 | 
			
		||||
 | 
			
		||||
    # Prepare model
 | 
			
		||||
    model = BertForMultipleChoice.from_pretrained(args.bert_model,
 | 
			
		||||
        cache_dir=os.path.join(str(PYTORCH_PRETRAINED_BERT_CACHE), 'distributed_{}'.format(args.local_rank)),
 | 
			
		||||
        num_choices=4)
 | 
			
		||||
    if args.fp16:
 | 
			
		||||
        model.half()
 | 
			
		||||
    model.to(device)
 | 
			
		||||
    if args.local_rank != -1:
 | 
			
		||||
        try:
 | 
			
		||||
            from apex.parallel import DistributedDataParallel as DDP
 | 
			
		||||
        except ImportError:
 | 
			
		||||
            raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
 | 
			
		||||
 | 
			
		||||
        model = DDP(model)
 | 
			
		||||
    elif n_gpu > 1:
 | 
			
		||||
        model = torch.nn.DataParallel(model)
 | 
			
		||||
 | 
			
		||||
    # Prepare optimizer
 | 
			
		||||
    param_optimizer = list(model.named_parameters())
 | 
			
		||||
 | 
			
		||||
    # hack to remove pooler, which is not used
 | 
			
		||||
    # thus it produce None grad that break apex
 | 
			
		||||
    param_optimizer = [n for n in param_optimizer if 'pooler' not in n[0]]
 | 
			
		||||
 | 
			
		||||
    no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
 | 
			
		||||
    optimizer_grouped_parameters = [
 | 
			
		||||
        {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
 | 
			
		||||
        {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
 | 
			
		||||
        ]
 | 
			
		||||
    if args.fp16:
 | 
			
		||||
        try:
 | 
			
		||||
            from apex.optimizers import FP16_Optimizer
 | 
			
		||||
            from apex.optimizers import FusedAdam
 | 
			
		||||
        except ImportError:
 | 
			
		||||
            raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
 | 
			
		||||
 | 
			
		||||
        optimizer = FusedAdam(optimizer_grouped_parameters,
 | 
			
		||||
                              lr=args.learning_rate,
 | 
			
		||||
                              bias_correction=False,
 | 
			
		||||
                              max_grad_norm=1.0)
 | 
			
		||||
        if args.loss_scale == 0:
 | 
			
		||||
            optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
 | 
			
		||||
        else:
 | 
			
		||||
            optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale)
 | 
			
		||||
        warmup_linear = WarmupLinearSchedule(warmup=args.warmup_proportion,
 | 
			
		||||
                                             t_total=num_train_optimization_steps)
 | 
			
		||||
    else:
 | 
			
		||||
        optimizer = BertAdam(optimizer_grouped_parameters,
 | 
			
		||||
                             lr=args.learning_rate,
 | 
			
		||||
                             warmup=args.warmup_proportion,
 | 
			
		||||
                             t_total=num_train_optimization_steps)
 | 
			
		||||
 | 
			
		||||
    global_step = 0
 | 
			
		||||
    if args.do_train:
 | 
			
		||||
        train_features = convert_examples_to_features(
 | 
			
		||||
            train_examples, tokenizer, args.max_seq_length, True)
 | 
			
		||||
        logger.info("***** Running training *****")
 | 
			
		||||
        logger.info("  Num examples = %d", len(train_examples))
 | 
			
		||||
        logger.info("  Batch size = %d", args.train_batch_size)
 | 
			
		||||
        logger.info("  Num steps = %d", num_train_optimization_steps)
 | 
			
		||||
        all_input_ids = torch.tensor(select_field(train_features, 'input_ids'), dtype=torch.long)
 | 
			
		||||
        all_input_mask = torch.tensor(select_field(train_features, 'input_mask'), dtype=torch.long)
 | 
			
		||||
        all_segment_ids = torch.tensor(select_field(train_features, 'segment_ids'), dtype=torch.long)
 | 
			
		||||
        all_label = torch.tensor([f.label for f in train_features], dtype=torch.long)
 | 
			
		||||
        train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label)
 | 
			
		||||
        if args.local_rank == -1:
 | 
			
		||||
            train_sampler = RandomSampler(train_data)
 | 
			
		||||
        else:
 | 
			
		||||
            train_sampler = DistributedSampler(train_data)
 | 
			
		||||
        train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size)
 | 
			
		||||
 | 
			
		||||
        model.train()
 | 
			
		||||
        for _ in trange(int(args.num_train_epochs), desc="Epoch"):
 | 
			
		||||
            tr_loss = 0
 | 
			
		||||
            nb_tr_examples, nb_tr_steps = 0, 0
 | 
			
		||||
            for step, batch in enumerate(tqdm(train_dataloader, desc="Iteration")):
 | 
			
		||||
                batch = tuple(t.to(device) for t in batch)
 | 
			
		||||
                input_ids, input_mask, segment_ids, label_ids = batch
 | 
			
		||||
                loss = model(input_ids, segment_ids, input_mask, label_ids)
 | 
			
		||||
                if n_gpu > 1:
 | 
			
		||||
                    loss = loss.mean() # mean() to average on multi-gpu.
 | 
			
		||||
                if args.fp16 and args.loss_scale != 1.0:
 | 
			
		||||
                    # rescale loss for fp16 training
 | 
			
		||||
                    # see https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/index.html
 | 
			
		||||
                    loss = loss * args.loss_scale
 | 
			
		||||
                if args.gradient_accumulation_steps > 1:
 | 
			
		||||
                    loss = loss / args.gradient_accumulation_steps
 | 
			
		||||
                tr_loss += loss.item()
 | 
			
		||||
                nb_tr_examples += input_ids.size(0)
 | 
			
		||||
                nb_tr_steps += 1
 | 
			
		||||
 | 
			
		||||
                if args.fp16:
 | 
			
		||||
                    optimizer.backward(loss)
 | 
			
		||||
                else:
 | 
			
		||||
                    loss.backward()
 | 
			
		||||
                if (step + 1) % args.gradient_accumulation_steps == 0:
 | 
			
		||||
                    if args.fp16:
 | 
			
		||||
                        # modify learning rate with special warm up BERT uses
 | 
			
		||||
                        # if args.fp16 is False, BertAdam is used that handles this automatically
 | 
			
		||||
                        lr_this_step = args.learning_rate * warmup_linear.get_lr(global_step/num_train_optimization_steps,
 | 
			
		||||
                                                                                 args.warmup_proportion)
 | 
			
		||||
                        for param_group in optimizer.param_groups:
 | 
			
		||||
                            param_group['lr'] = lr_this_step
 | 
			
		||||
                    optimizer.step()
 | 
			
		||||
                    optimizer.zero_grad()
 | 
			
		||||
                    global_step += 1
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    if args.do_train:
 | 
			
		||||
        # Save a trained model, configuration and tokenizer
 | 
			
		||||
        model_to_save = model.module if hasattr(model, 'module') else model  # Only save the model it-self
 | 
			
		||||
 | 
			
		||||
        # If we save using the predefined names, we can load using `from_pretrained`
 | 
			
		||||
        output_model_file = os.path.join(args.output_dir, WEIGHTS_NAME)
 | 
			
		||||
        output_config_file = os.path.join(args.output_dir, CONFIG_NAME)
 | 
			
		||||
 | 
			
		||||
        torch.save(model_to_save.state_dict(), output_model_file)
 | 
			
		||||
        model_to_save.config.to_json_file(output_config_file)
 | 
			
		||||
        tokenizer.save_vocabulary(args.output_dir)
 | 
			
		||||
 | 
			
		||||
        # Load a trained model and vocabulary that you have fine-tuned
 | 
			
		||||
        model = BertForMultipleChoice.from_pretrained(args.output_dir, num_choices=4)
 | 
			
		||||
        tokenizer = BertTokenizer.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case)
 | 
			
		||||
    else:
 | 
			
		||||
        model = BertForMultipleChoice.from_pretrained(args.bert_model, num_choices=4)
 | 
			
		||||
    model.to(device)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    if args.do_eval and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
 | 
			
		||||
        eval_examples = read_swag_examples(os.path.join(args.data_dir, 'val.csv'), is_training = True)
 | 
			
		||||
        eval_features = convert_examples_to_features(
 | 
			
		||||
            eval_examples, tokenizer, args.max_seq_length, True)
 | 
			
		||||
        logger.info("***** Running evaluation *****")
 | 
			
		||||
        logger.info("  Num examples = %d", len(eval_examples))
 | 
			
		||||
        logger.info("  Batch size = %d", args.eval_batch_size)
 | 
			
		||||
        all_input_ids = torch.tensor(select_field(eval_features, 'input_ids'), dtype=torch.long)
 | 
			
		||||
        all_input_mask = torch.tensor(select_field(eval_features, 'input_mask'), dtype=torch.long)
 | 
			
		||||
        all_segment_ids = torch.tensor(select_field(eval_features, 'segment_ids'), dtype=torch.long)
 | 
			
		||||
        all_label = torch.tensor([f.label for f in eval_features], dtype=torch.long)
 | 
			
		||||
        eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label)
 | 
			
		||||
        # Run prediction for full data
 | 
			
		||||
        eval_sampler = SequentialSampler(eval_data)
 | 
			
		||||
        eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)
 | 
			
		||||
 | 
			
		||||
        model.eval()
 | 
			
		||||
        eval_loss, eval_accuracy = 0, 0
 | 
			
		||||
        nb_eval_steps, nb_eval_examples = 0, 0
 | 
			
		||||
        for input_ids, input_mask, segment_ids, label_ids in tqdm(eval_dataloader, desc="Evaluating"):
 | 
			
		||||
            input_ids = input_ids.to(device)
 | 
			
		||||
            input_mask = input_mask.to(device)
 | 
			
		||||
            segment_ids = segment_ids.to(device)
 | 
			
		||||
            label_ids = label_ids.to(device)
 | 
			
		||||
 | 
			
		||||
            with torch.no_grad():
 | 
			
		||||
                tmp_eval_loss = model(input_ids, segment_ids, input_mask, label_ids)
 | 
			
		||||
                logits = model(input_ids, segment_ids, input_mask)
 | 
			
		||||
 | 
			
		||||
            logits = logits.detach().cpu().numpy()
 | 
			
		||||
            label_ids = label_ids.to('cpu').numpy()
 | 
			
		||||
            tmp_eval_accuracy = accuracy(logits, label_ids)
 | 
			
		||||
 | 
			
		||||
            eval_loss += tmp_eval_loss.mean().item()
 | 
			
		||||
            eval_accuracy += tmp_eval_accuracy
 | 
			
		||||
 | 
			
		||||
            nb_eval_examples += input_ids.size(0)
 | 
			
		||||
            nb_eval_steps += 1
 | 
			
		||||
 | 
			
		||||
        eval_loss = eval_loss / nb_eval_steps
 | 
			
		||||
        eval_accuracy = eval_accuracy / nb_eval_examples
 | 
			
		||||
 | 
			
		||||
        result = {'eval_loss': eval_loss,
 | 
			
		||||
                  'eval_accuracy': eval_accuracy,
 | 
			
		||||
                  'global_step': global_step,
 | 
			
		||||
                  'loss': tr_loss/nb_tr_steps}
 | 
			
		||||
 | 
			
		||||
        output_eval_file = os.path.join(args.output_dir, "eval_results.txt")
 | 
			
		||||
        with open(output_eval_file, "w") as writer:
 | 
			
		||||
            logger.info("***** Eval results *****")
 | 
			
		||||
            for key in sorted(result.keys()):
 | 
			
		||||
                logger.info("  %s = %s", key, str(result[key]))
 | 
			
		||||
                writer.write("%s = %s\n" % (key, str(result[key])))
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
if __name__ == "__main__":
 | 
			
		||||
    main()
 | 
			
		||||
							
								
								
									
										48
									
								
								examples/run_tf_glue.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						@ -0,0 +1,48 @@
 | 
			
		||||
import tensorflow as tf
 | 
			
		||||
import tensorflow_datasets
 | 
			
		||||
from transformers import *
 | 
			
		||||
 | 
			
		||||
# Load dataset, tokenizer, model from pretrained model/vocabulary
 | 
			
		||||
tokenizer = BertTokenizer.from_pretrained('bert-base-cased')
 | 
			
		||||
model = TFBertForSequenceClassification.from_pretrained('bert-base-cased')
 | 
			
		||||
data = tensorflow_datasets.load('glue/mrpc')
 | 
			
		||||
 | 
			
		||||
# Prepare dataset for GLUE as a tf.data.Dataset instance
 | 
			
		||||
train_dataset = glue_convert_examples_to_features(data['train'], tokenizer, 128, 'mrpc')
 | 
			
		||||
valid_dataset = glue_convert_examples_to_features(data['validation'], tokenizer, 128, 'mrpc')
 | 
			
		||||
train_dataset = train_dataset.shuffle(100).batch(32).repeat(2)
 | 
			
		||||
valid_dataset = valid_dataset.batch(64)
 | 
			
		||||
 | 
			
		||||
# Prepare training: Compile tf.keras model with optimizer, loss and learning rate schedule 
 | 
			
		||||
optimizer = tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08, clipnorm=1.0)
 | 
			
		||||
loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
 | 
			
		||||
metric = tf.keras.metrics.SparseCategoricalAccuracy('accuracy')
 | 
			
		||||
model.compile(optimizer=optimizer, loss=loss, metrics=[metric])
 | 
			
		||||
 | 
			
		||||
# Train and evaluate using tf.keras.Model.fit()
 | 
			
		||||
history = model.fit(train_dataset, epochs=2, steps_per_epoch=115,
 | 
			
		||||
                    validation_data=valid_dataset, validation_steps=7)
 | 
			
		||||
 | 
			
		||||
>>> Train for 115 steps, validate for 7 steps
 | 
			
		||||
>>> Epoch 1/2
 | 
			
		||||
>>> 115/115 [==============================] - 53s 459ms/step - loss: 0.6033 - accuracy: 0.6712 - val_loss: 0.4964 - val_accuracy: 0.7647
 | 
			
		||||
>>> Epoch 2/2
 | 
			
		||||
>>> 115/115 [==============================] - 33s 289ms/step - loss: 0.4141 - accuracy: 0.8160 - val_loss: 0.3914 - val_accuracy: 0.8382
 | 
			
		||||
 | 
			
		||||
# Load the TensorFlow model in PyTorch for inspection
 | 
			
		||||
model.save_pretrained('./save/')
 | 
			
		||||
pytorch_model = BertForSequenceClassification.from_pretrained('./save/', from_tf=True)
 | 
			
		||||
 | 
			
		||||
# Quickly test a few predictions - MRPC is a paraphrasing task, let's see if our model learned the task
 | 
			
		||||
sentence_0 = "This research was consistent with his findings."
 | 
			
		||||
sentence_1 = "His findings were compatible with this research."
 | 
			
		||||
sentence_2 = "His findings were not compatible with this research."
 | 
			
		||||
inputs_1 = tokenizer.encode_plus(sentence_0, sentence_1, add_special_tokens=True, return_tensors='pt')
 | 
			
		||||
inputs_2 = tokenizer.encode_plus(sentence_0, sentence_2, add_special_tokens=True, return_tensors='pt')
 | 
			
		||||
 | 
			
		||||
pred_1 = pytorch_model(**inputs_1)[0].argmax().item()
 | 
			
		||||
pred_2 = pytorch_model(**inputs_2)[0].argmax().item()
 | 
			
		||||
print("sentence_1 is", "a paraphrase" if pred_1 else "not a paraphrase", "of sentence_0")
 | 
			
		||||
print("sentence_2 is", "a paraphrase" if pred_2 else "not a paraphrase", "of sentence_0")
 | 
			
		||||
>>> sentence_1 is a paraphrase of sentence_0
 | 
			
		||||
>>> sentence_2 is not a paraphrase of sentence_0
 | 
			
		||||
							
								
								
									
										111
									
								
								examples/test_examples.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						@ -0,0 +1,111 @@
 | 
			
		||||
# coding=utf-8
 | 
			
		||||
# Copyright 2018 HuggingFace Inc..
 | 
			
		||||
#
 | 
			
		||||
# Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
# you may not use this file except in compliance with the License.
 | 
			
		||||
# You may obtain a copy of the License at
 | 
			
		||||
#
 | 
			
		||||
#     http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
#
 | 
			
		||||
# Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
# distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
# See the License for the specific language governing permissions and
 | 
			
		||||
# limitations under the License.
 | 
			
		||||
from __future__ import absolute_import
 | 
			
		||||
from __future__ import division
 | 
			
		||||
from __future__ import print_function
 | 
			
		||||
 | 
			
		||||
import sys
 | 
			
		||||
import unittest
 | 
			
		||||
import argparse
 | 
			
		||||
import logging
 | 
			
		||||
 | 
			
		||||
try:
 | 
			
		||||
    # python 3.4+ can use builtin unittest.mock instead of mock package
 | 
			
		||||
    from unittest.mock import patch
 | 
			
		||||
except ImportError:
 | 
			
		||||
    from mock import patch
 | 
			
		||||
 | 
			
		||||
import run_glue
 | 
			
		||||
import run_squad
 | 
			
		||||
import run_generation
 | 
			
		||||
 | 
			
		||||
logging.basicConfig(level=logging.DEBUG)
 | 
			
		||||
 | 
			
		||||
logger = logging.getLogger()
 | 
			
		||||
 | 
			
		||||
def get_setup_file():
 | 
			
		||||
    parser = argparse.ArgumentParser()
 | 
			
		||||
    parser.add_argument('-f')
 | 
			
		||||
    args = parser.parse_args()
 | 
			
		||||
    return args.f
 | 
			
		||||
 | 
			
		||||
class ExamplesTests(unittest.TestCase):
 | 
			
		||||
 | 
			
		||||
    def test_run_glue(self):
 | 
			
		||||
        stream_handler = logging.StreamHandler(sys.stdout)
 | 
			
		||||
        logger.addHandler(stream_handler)
 | 
			
		||||
 | 
			
		||||
        testargs = ["run_glue.py",
 | 
			
		||||
                    "--data_dir=./examples/tests_samples/MRPC/",
 | 
			
		||||
                    "--task_name=mrpc",
 | 
			
		||||
                    "--do_train",
 | 
			
		||||
                    "--do_eval",
 | 
			
		||||
                    "--output_dir=./examples/tests_samples/temp_dir",
 | 
			
		||||
                    "--per_gpu_train_batch_size=2",
 | 
			
		||||
                    "--per_gpu_eval_batch_size=1",
 | 
			
		||||
                    "--learning_rate=1e-4",
 | 
			
		||||
                    "--max_steps=10",
 | 
			
		||||
                    "--warmup_steps=2",
 | 
			
		||||
                    "--overwrite_output_dir",
 | 
			
		||||
                    "--seed=42"]
 | 
			
		||||
        model_type, model_name = ("--model_type=bert",
 | 
			
		||||
                                  "--model_name_or_path=bert-base-uncased")
 | 
			
		||||
        with patch.object(sys, 'argv', testargs + [model_type, model_name]):
 | 
			
		||||
            result = run_glue.main()
 | 
			
		||||
            for value in result.values():
 | 
			
		||||
                self.assertGreaterEqual(value, 0.75)
 | 
			
		||||
 | 
			
		||||
    def test_run_squad(self):
 | 
			
		||||
        stream_handler = logging.StreamHandler(sys.stdout)
 | 
			
		||||
        logger.addHandler(stream_handler)
 | 
			
		||||
 | 
			
		||||
        testargs = ["run_squad.py",
 | 
			
		||||
                    "--train_file=./examples/tests_samples/SQUAD/dev-v2.0-small.json",
 | 
			
		||||
                    "--predict_file=./examples/tests_samples/SQUAD/dev-v2.0-small.json",
 | 
			
		||||
                    "--model_name=bert-base-uncased",
 | 
			
		||||
                    "--output_dir=./examples/tests_samples/temp_dir",
 | 
			
		||||
                    "--max_steps=10",
 | 
			
		||||
                    "--warmup_steps=2",
 | 
			
		||||
                    "--do_train",
 | 
			
		||||
                    "--do_eval",
 | 
			
		||||
                    "--version_2_with_negative",
 | 
			
		||||
                    "--learning_rate=2e-4",
 | 
			
		||||
                    "--per_gpu_train_batch_size=2",
 | 
			
		||||
                    "--per_gpu_eval_batch_size=1",
 | 
			
		||||
                    "--overwrite_output_dir",
 | 
			
		||||
                    "--seed=42"]
 | 
			
		||||
        model_type, model_name = ("--model_type=bert",
 | 
			
		||||
                                  "--model_name_or_path=bert-base-uncased")
 | 
			
		||||
        with patch.object(sys, 'argv', testargs + [model_type, model_name]):
 | 
			
		||||
            result = run_squad.main()
 | 
			
		||||
            self.assertGreaterEqual(result['f1'], 30)
 | 
			
		||||
            self.assertGreaterEqual(result['exact'], 30)
 | 
			
		||||
 | 
			
		||||
    def test_generation(self):
 | 
			
		||||
        stream_handler = logging.StreamHandler(sys.stdout)
 | 
			
		||||
        logger.addHandler(stream_handler)
 | 
			
		||||
 | 
			
		||||
        testargs = ["run_generation.py",
 | 
			
		||||
                    "--prompt=Hello",
 | 
			
		||||
                    "--length=10",
 | 
			
		||||
                    "--seed=42"]
 | 
			
		||||
        model_type, model_name = ("--model_type=openai-gpt",
 | 
			
		||||
                                  "--model_name_or_path=openai-gpt")
 | 
			
		||||
        with patch.object(sys, 'argv', testargs + [model_type, model_name]):
 | 
			
		||||
            result = run_generation.main()
 | 
			
		||||
            self.assertGreaterEqual(len(result), 10)
 | 
			
		||||
 | 
			
		||||
if __name__ == "__main__":
 | 
			
		||||
    unittest.main()
 | 
			
		||||
							
								
								
									
										6
									
								
								examples/tests_samples/.gitignore
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						@ -0,0 +1,6 @@
 | 
			
		||||
*.*
 | 
			
		||||
cache*
 | 
			
		||||
temp*
 | 
			
		||||
!*.tsv
 | 
			
		||||
!*.json
 | 
			
		||||
!.gitignore
 | 
			
		||||
							
								
								
									
										7
									
								
								examples/tests_samples/MRPC/dev.tsv
									
									
									
									
									
										Normal file
									
								
							
							
						
						@ -0,0 +1,7 @@
 | 
			
		||||
Quality	#1 ID	#2 ID	#1 String	#2 String
 | 
			
		||||
1	1355540	1355592	He said the foodservice pie business doesn 't fit the company 's long-term growth strategy .	" The foodservice pie business does not fit our long-term growth strategy .
 | 
			
		||||
0	2029631	2029565	Magnarelli said Racicot hated the Iraqi regime and looked forward to using his long years of training in the war .	His wife said he was " 100 percent behind George Bush " and looked forward to using his years of training in the war .
 | 
			
		||||
0	487993	487952	The dollar was at 116.92 yen against the yen , flat on the session , and at 1.2891 against the Swiss franc , also flat .	The dollar was at 116.78 yen JPY = , virtually flat on the session , and at 1.2871 against the Swiss franc CHF = , down 0.1 percent .
 | 
			
		||||
1	1989515	1989458	The AFL-CIO is waiting until October to decide if it will endorse a candidate .	The AFL-CIO announced Wednesday that it will decide in October whether to endorse a candidate before the primaries .
 | 
			
		||||
0	1783137	1782659	No dates have been set for the civil or the criminal trial .	No dates have been set for the criminal or civil cases , but Shanley has pleaded not guilty .
 | 
			
		||||
1	3039165	3039036	Wal-Mart said it would check all of its million-plus domestic workers to ensure they were legally employed .	It has also said it would review all of its domestic employees more than 1 million to ensure they have legal status .
 | 
			
		||||
| 
		
		
			 Can't render this file because it contains an unexpected character in line 3 and column 155. 
		
	 | 
							
								
								
									
										7
									
								
								examples/tests_samples/MRPC/train.tsv
									
									
									
									
									
										Normal file
									
								
							
							
						
						@ -0,0 +1,7 @@
 | 
			
		||||
Quality	#1 ID	#2 ID	#1 String	#2 String
 | 
			
		||||
1	1355540	1355592	He said the foodservice pie business doesn 't fit the company 's long-term growth strategy .	" The foodservice pie business does not fit our long-term growth strategy .
 | 
			
		||||
0	2029631	2029565	Magnarelli said Racicot hated the Iraqi regime and looked forward to using his long years of training in the war .	His wife said he was " 100 percent behind George Bush " and looked forward to using his years of training in the war .
 | 
			
		||||
0	487993	487952	The dollar was at 116.92 yen against the yen , flat on the session , and at 1.2891 against the Swiss franc , also flat .	The dollar was at 116.78 yen JPY = , virtually flat on the session , and at 1.2871 against the Swiss franc CHF = , down 0.1 percent .
 | 
			
		||||
1	1989515	1989458	The AFL-CIO is waiting until October to decide if it will endorse a candidate .	The AFL-CIO announced Wednesday that it will decide in October whether to endorse a candidate before the primaries .
 | 
			
		||||
0	1783137	1782659	No dates have been set for the civil or the criminal trial .	No dates have been set for the criminal or civil cases , but Shanley has pleaded not guilty .
 | 
			
		||||
1	3039165	3039036	Wal-Mart said it would check all of its million-plus domestic workers to ensure they were legally employed .	It has also said it would review all of its domestic employees more than 1 million to ensure they have legal status .
 | 
			
		||||
| 
		
		
			 Can't render this file because it contains an unexpected character in line 3 and column 155. 
		
	 | 
							
								
								
									
										140
									
								
								examples/tests_samples/SQUAD/dev-v2.0-small.json
									
									
									
									
									
										Normal file
									
								
							
							
						
						@ -0,0 +1,140 @@
 | 
			
		||||
{
 | 
			
		||||
    "version": "v2.0",
 | 
			
		||||
    "data": [{
 | 
			
		||||
        "title": "Normans",
 | 
			
		||||
        "paragraphs": [{
 | 
			
		||||
            "qas": [{
 | 
			
		||||
                "question": "In what country is Normandy located?",
 | 
			
		||||
                "id": "56ddde6b9a695914005b9628",
 | 
			
		||||
                "answers": [{
 | 
			
		||||
                    "text": "France",
 | 
			
		||||
                    "answer_start": 159
 | 
			
		||||
                }],
 | 
			
		||||
                "is_impossible": false
 | 
			
		||||
            }, {
 | 
			
		||||
                "question": "When were the Normans in Normandy?",
 | 
			
		||||
                "id": "56ddde6b9a695914005b9629",
 | 
			
		||||
                "answers": [{
 | 
			
		||||
                    "text": "10th and 11th centuries",
 | 
			
		||||
                    "answer_start": 94
 | 
			
		||||
                }],
 | 
			
		||||
                "is_impossible": false
 | 
			
		||||
            }, {
 | 
			
		||||
                "question": "From which countries did the Norse originate?",
 | 
			
		||||
                "id": "56ddde6b9a695914005b962a",
 | 
			
		||||
                "answers": [{
 | 
			
		||||
                    "text": "Denmark, Iceland and Norway",
 | 
			
		||||
                    "answer_start": 256
 | 
			
		||||
                }],
 | 
			
		||||
                "is_impossible": false
 | 
			
		||||
            }, {
 | 
			
		||||
                "plausible_answers": [{
 | 
			
		||||
                    "text": "Rollo",
 | 
			
		||||
                    "answer_start": 308
 | 
			
		||||
                }],
 | 
			
		||||
                "question": "Who did King Charles III swear fealty to?",
 | 
			
		||||
                "id": "5ad39d53604f3c001a3fe8d3",
 | 
			
		||||
                "answers": [],
 | 
			
		||||
                "is_impossible": true
 | 
			
		||||
            }, {
 | 
			
		||||
                "plausible_answers": [{
 | 
			
		||||
                    "text": "10th century",
 | 
			
		||||
                    "answer_start": 671
 | 
			
		||||
                }],
 | 
			
		||||
                "question": "When did the Frankish identity emerge?",
 | 
			
		||||
                "id": "5ad39d53604f3c001a3fe8d4",
 | 
			
		||||
                "answers": [],
 | 
			
		||||
                "is_impossible": true
 | 
			
		||||
            }],
 | 
			
		||||
            "context": "The Normans (Norman: Nourmands; French: Normands; Latin: Normanni) were the people who in the 10th and 11th centuries gave their name to Normandy, a region in France. They were descended from Norse (\"Norman\" comes from \"Norseman\") raiders and pirates from Denmark, Iceland and Norway who, under their leader Rollo, agreed to swear fealty to King Charles III of West Francia. Through generations of assimilation and mixing with the native Frankish and Roman-Gaulish populations, their descendants would gradually merge with the Carolingian-based cultures of West Francia. The distinct cultural and ethnic identity of the Normans emerged initially in the first half of the 10th century, and it continued to evolve over the succeeding centuries."
 | 
			
		||||
        }, {
 | 
			
		||||
            "qas": [{
 | 
			
		||||
                "question": "Who was the duke in the battle of Hastings?",
 | 
			
		||||
                "id": "56dddf4066d3e219004dad5f",
 | 
			
		||||
                "answers": [{
 | 
			
		||||
                    "text": "William the Conqueror",
 | 
			
		||||
                    "answer_start": 1022
 | 
			
		||||
                }],
 | 
			
		||||
                "is_impossible": false
 | 
			
		||||
            }, {
 | 
			
		||||
                "plausible_answers": [{
 | 
			
		||||
                    "text": "Antioch",
 | 
			
		||||
                    "answer_start": 1295
 | 
			
		||||
                }],
 | 
			
		||||
                "question": "What principality did William the conquerer found?",
 | 
			
		||||
                "id": "5ad3a266604f3c001a3fea2b",
 | 
			
		||||
                "answers": [],
 | 
			
		||||
                "is_impossible": true
 | 
			
		||||
            }],
 | 
			
		||||
            "context": "The Norman dynasty had a major political, cultural and military impact on medieval Europe and even the Near East. The Normans were famed for their martial spirit and eventually for their Christian piety, becoming exponents of the Catholic orthodoxy into which they assimilated. They adopted the Gallo-Romance language of the Frankish land they settled, their dialect becoming known as Norman, Normaund or Norman French, an important literary language. The Duchy of Normandy, which they formed by treaty with the French crown, was a great fief of medieval France, and under Richard I of Normandy was forged into a cohesive and formidable principality in feudal tenure. The Normans are noted both for their culture, such as their unique Romanesque architecture and musical traditions, and for their significant military accomplishments and innovations. Norman adventurers founded the Kingdom of Sicily under Roger II after conquering southern Italy on the Saracens and Byzantines, and an expedition on behalf of their duke, William the Conqueror, led to the Norman conquest of England at the Battle of Hastings in 1066. Norman cultural and military influence spread from these new European centres to the Crusader states of the Near East, where their prince Bohemond I founded the Principality of Antioch in the Levant, to Scotland and Wales in Great Britain, to Ireland, and to the coasts of north Africa and the Canary Islands."
 | 
			
		||||
        }]
 | 
			
		||||
    }, {
 | 
			
		||||
        "title": "Computational_complexity_theory",
 | 
			
		||||
        "paragraphs": [{
 | 
			
		||||
            "qas": [{
 | 
			
		||||
                "question": "What branch of theoretical computer science deals with broadly classifying computational problems by difficulty and class of relationship?",
 | 
			
		||||
                "id": "56e16182e3433e1400422e28",
 | 
			
		||||
                "answers": [{
 | 
			
		||||
                    "text": "Computational complexity theory",
 | 
			
		||||
                    "answer_start": 0
 | 
			
		||||
                }],
 | 
			
		||||
                "is_impossible": false
 | 
			
		||||
            }, {
 | 
			
		||||
                "plausible_answers": [{
 | 
			
		||||
                    "text": "algorithm",
 | 
			
		||||
                    "answer_start": 472
 | 
			
		||||
                }],
 | 
			
		||||
                "question": "What is a manual application of mathematical steps?",
 | 
			
		||||
                "id": "5ad5316b5b96ef001a10ab76",
 | 
			
		||||
                "answers": [],
 | 
			
		||||
                "is_impossible": true
 | 
			
		||||
            }],
 | 
			
		||||
            "context": "Computational complexity theory is a branch of the theory of computation in theoretical computer science that focuses on classifying computational problems according to their inherent difficulty, and relating those classes to each other. A computational problem is understood to be a task that is in principle amenable to being solved by a computer, which is equivalent to stating that the problem may be solved by mechanical application of mathematical steps, such as an algorithm."
 | 
			
		||||
        }, {
 | 
			
		||||
            "qas": [{
 | 
			
		||||
                "question": "What measure of a computational problem broadly defines the inherent difficulty of the solution?",
 | 
			
		||||
                "id": "56e16839cd28a01900c67887",
 | 
			
		||||
                "answers": [{
 | 
			
		||||
                    "text": "if its solution requires significant resources",
 | 
			
		||||
                    "answer_start": 46
 | 
			
		||||
                }],
 | 
			
		||||
                "is_impossible": false
 | 
			
		||||
            }, {
 | 
			
		||||
                "question": "What method is used to intuitively assess or quantify the amount of resources required to solve a computational problem?",
 | 
			
		||||
                "id": "56e16839cd28a01900c67888",
 | 
			
		||||
                "answers": [{
 | 
			
		||||
                    "text": "mathematical models of computation",
 | 
			
		||||
                    "answer_start": 176
 | 
			
		||||
                }],
 | 
			
		||||
                "is_impossible": false
 | 
			
		||||
            }, {
 | 
			
		||||
                "question": "What are two basic primary resources used to guage complexity?",
 | 
			
		||||
                "id": "56e16839cd28a01900c67889",
 | 
			
		||||
                "answers": [{
 | 
			
		||||
                    "text": "time and storage",
 | 
			
		||||
                    "answer_start": 305
 | 
			
		||||
                }],
 | 
			
		||||
                "is_impossible": false
 | 
			
		||||
            }, {
 | 
			
		||||
                "plausible_answers": [{
 | 
			
		||||
                    "text": "the number of gates in a circuit",
 | 
			
		||||
                    "answer_start": 436
 | 
			
		||||
                }],
 | 
			
		||||
                "question": "What unit is measured to determine circuit simplicity?",
 | 
			
		||||
                "id": "5ad532575b96ef001a10ab7f",
 | 
			
		||||
                "answers": [],
 | 
			
		||||
                "is_impossible": true
 | 
			
		||||
            }, {
 | 
			
		||||
                "plausible_answers": [{
 | 
			
		||||
                    "text": "the number of processors",
 | 
			
		||||
                    "answer_start": 502
 | 
			
		||||
                }],
 | 
			
		||||
                "question": "What number is used in perpendicular computing?",
 | 
			
		||||
                "id": "5ad532575b96ef001a10ab80",
 | 
			
		||||
                "answers": [],
 | 
			
		||||
                "is_impossible": true
 | 
			
		||||
            }],
 | 
			
		||||
            "context": "A problem is regarded as inherently difficult if its solution requires significant resources, whatever the algorithm used. The theory formalizes this intuition, by introducing mathematical models of computation to study these problems and quantifying the amount of resources needed to solve them, such as time and storage. Other complexity measures are also used, such as the amount of communication (used in communication complexity), the number of gates in a circuit (used in circuit complexity) and the number of processors (used in parallel computing). One of the roles of computational complexity theory is to determine the practical limits on what computers can and cannot do."
 | 
			
		||||
        }]
 | 
			
		||||
    }]
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										463
									
								
								examples/utils_multiple_choice.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						@ -0,0 +1,463 @@
 | 
			
		||||
# coding=utf-8
 | 
			
		||||
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
 | 
			
		||||
# Copyright (c) 2018, NVIDIA CORPORATION.  All rights reserved.
 | 
			
		||||
#
 | 
			
		||||
# Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
# you may not use this file except in compliance with the License.
 | 
			
		||||
# You may obtain a copy of the License at
 | 
			
		||||
#
 | 
			
		||||
#     http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
#
 | 
			
		||||
# Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
# distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
# See the License for the specific language governing permissions and
 | 
			
		||||
# limitations under the License.
 | 
			
		||||
""" BERT multiple choice fine-tuning: utilities to work with multiple choice tasks of reading comprehension  """
 | 
			
		||||
 | 
			
		||||
from __future__ import absolute_import, division, print_function
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
import logging
 | 
			
		||||
import os
 | 
			
		||||
import sys
 | 
			
		||||
from io import open
 | 
			
		||||
import json
 | 
			
		||||
import csv
 | 
			
		||||
import glob
 | 
			
		||||
import tqdm
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
logger = logging.getLogger(__name__)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class InputExample(object):
 | 
			
		||||
    """A single training/test example for multiple choice"""
 | 
			
		||||
 | 
			
		||||
    def __init__(self, example_id, question,  contexts, endings, label=None):
 | 
			
		||||
        """Constructs a InputExample.
 | 
			
		||||
 | 
			
		||||
        Args:
 | 
			
		||||
            example_id: Unique id for the example.
 | 
			
		||||
            contexts: list of str. The untokenized text of the first sequence (context of corresponding question).
 | 
			
		||||
            question: string. The untokenized text of the second sequence (qustion).
 | 
			
		||||
            endings: list of str. multiple choice's options. Its length must be equal to contexts' length.
 | 
			
		||||
            label: (Optional) string. The label of the example. This should be
 | 
			
		||||
            specified for train and dev examples, but not for test examples.
 | 
			
		||||
        """
 | 
			
		||||
        self.example_id = example_id
 | 
			
		||||
        self.question = question
 | 
			
		||||
        self.contexts = contexts
 | 
			
		||||
        self.endings = endings
 | 
			
		||||
        self.label = label
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class InputFeatures(object):
 | 
			
		||||
    def __init__(self,
 | 
			
		||||
                 example_id,
 | 
			
		||||
                 choices_features,
 | 
			
		||||
                 label
 | 
			
		||||
 | 
			
		||||
    ):
 | 
			
		||||
        self.example_id = example_id
 | 
			
		||||
        self.choices_features = [
 | 
			
		||||
            {
 | 
			
		||||
                'input_ids': input_ids,
 | 
			
		||||
                'input_mask': input_mask,
 | 
			
		||||
                'segment_ids': segment_ids
 | 
			
		||||
            }
 | 
			
		||||
            for _, input_ids, input_mask, segment_ids in choices_features
 | 
			
		||||
        ]
 | 
			
		||||
        self.label = label
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class DataProcessor(object):
 | 
			
		||||
    """Base class for data converters for multiple choice data sets."""
 | 
			
		||||
 | 
			
		||||
    def get_train_examples(self, data_dir):
 | 
			
		||||
        """Gets a collection of `InputExample`s for the train set."""
 | 
			
		||||
        raise NotImplementedError()
 | 
			
		||||
 | 
			
		||||
    def get_dev_examples(self, data_dir):
 | 
			
		||||
        """Gets a collection of `InputExample`s for the dev set."""
 | 
			
		||||
        raise NotImplementedError()
 | 
			
		||||
 | 
			
		||||
    def get_test_examples(self, data_dir):
 | 
			
		||||
        """Gets a collection of `InputExample`s for the test set."""
 | 
			
		||||
        raise NotImplementedError()
 | 
			
		||||
 | 
			
		||||
    def get_labels(self):
 | 
			
		||||
        """Gets the list of labels for this data set."""
 | 
			
		||||
        raise NotImplementedError()
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class RaceProcessor(DataProcessor):
 | 
			
		||||
    """Processor for the RACE data set."""
 | 
			
		||||
 | 
			
		||||
    def get_train_examples(self, data_dir):
 | 
			
		||||
        """See base class."""
 | 
			
		||||
        logger.info("LOOKING AT {} train".format(data_dir))
 | 
			
		||||
        high = os.path.join(data_dir, 'train/high')
 | 
			
		||||
        middle = os.path.join(data_dir, 'train/middle')
 | 
			
		||||
        high = self._read_txt(high)
 | 
			
		||||
        middle = self._read_txt(middle)
 | 
			
		||||
        return self._create_examples(high + middle, 'train')
 | 
			
		||||
 | 
			
		||||
    def get_dev_examples(self, data_dir):
 | 
			
		||||
        """See base class."""
 | 
			
		||||
        logger.info("LOOKING AT {} dev".format(data_dir))
 | 
			
		||||
        high = os.path.join(data_dir, 'dev/high')
 | 
			
		||||
        middle = os.path.join(data_dir, 'dev/middle')
 | 
			
		||||
        high = self._read_txt(high)
 | 
			
		||||
        middle = self._read_txt(middle)
 | 
			
		||||
        return self._create_examples(high + middle, 'dev')
 | 
			
		||||
 | 
			
		||||
    def get_test_examples(self, data_dir):
 | 
			
		||||
        """See base class."""
 | 
			
		||||
        logger.info("LOOKING AT {} test".format(data_dir))
 | 
			
		||||
        high = os.path.join(data_dir, 'test/high')
 | 
			
		||||
        middle = os.path.join(data_dir, 'test/middle')
 | 
			
		||||
        high = self._read_txt(high)
 | 
			
		||||
        middle = self._read_txt(middle)
 | 
			
		||||
        return self._create_examples(high + middle, 'test')
 | 
			
		||||
 | 
			
		||||
    def get_labels(self):
 | 
			
		||||
        """See base class."""
 | 
			
		||||
        return ["0", "1", "2", "3"]
 | 
			
		||||
 | 
			
		||||
    def _read_txt(self, input_dir):
 | 
			
		||||
        lines = []
 | 
			
		||||
        files = glob.glob(input_dir + "/*txt")
 | 
			
		||||
        for file in tqdm.tqdm(files, desc="read files"):
 | 
			
		||||
            with open(file, 'r', encoding='utf-8') as fin:
 | 
			
		||||
                data_raw = json.load(fin)
 | 
			
		||||
                data_raw["race_id"] = file
 | 
			
		||||
                lines.append(data_raw)
 | 
			
		||||
        return lines
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    def _create_examples(self, lines, set_type):
 | 
			
		||||
        """Creates examples for the training and dev sets."""
 | 
			
		||||
        examples = []
 | 
			
		||||
        for (_, data_raw) in enumerate(lines):
 | 
			
		||||
            race_id = "%s-%s" % (set_type, data_raw["race_id"])
 | 
			
		||||
            article = data_raw["article"]
 | 
			
		||||
            for i in range(len(data_raw["answers"])):
 | 
			
		||||
                truth = str(ord(data_raw['answers'][i]) - ord('A'))
 | 
			
		||||
                question = data_raw['questions'][i]
 | 
			
		||||
                options = data_raw['options'][i]
 | 
			
		||||
 | 
			
		||||
                examples.append(
 | 
			
		||||
                    InputExample(
 | 
			
		||||
                        example_id=race_id,
 | 
			
		||||
                        question=question,
 | 
			
		||||
                        contexts=[article, article, article, article], # this is not efficient but convenient
 | 
			
		||||
                        endings=[options[0], options[1], options[2], options[3]],
 | 
			
		||||
                        label=truth))
 | 
			
		||||
        return examples
 | 
			
		||||
 | 
			
		||||
class SwagProcessor(DataProcessor):
 | 
			
		||||
    """Processor for the SWAG data set."""
 | 
			
		||||
 | 
			
		||||
    def get_train_examples(self, data_dir):
 | 
			
		||||
        """See base class."""
 | 
			
		||||
        logger.info("LOOKING AT {} train".format(data_dir))
 | 
			
		||||
        return self._create_examples(self._read_csv(os.path.join(data_dir, "train.csv")), "train")
 | 
			
		||||
 | 
			
		||||
    def get_dev_examples(self, data_dir):
 | 
			
		||||
        """See base class."""
 | 
			
		||||
        logger.info("LOOKING AT {} dev".format(data_dir))
 | 
			
		||||
        return self._create_examples(self._read_csv(os.path.join(data_dir, "val.csv")), "dev")
 | 
			
		||||
 | 
			
		||||
    def get_test_examples(self, data_dir):
 | 
			
		||||
        """See base class."""
 | 
			
		||||
        logger.info("LOOKING AT {} dev".format(data_dir))
 | 
			
		||||
        raise ValueError(
 | 
			
		||||
            "For swag testing, the input file does not contain a label column. It can not be tested in current code"
 | 
			
		||||
            "setting!"
 | 
			
		||||
        )
 | 
			
		||||
        return self._create_examples(self._read_csv(os.path.join(data_dir, "test.csv")), "test")
 | 
			
		||||
    def get_labels(self):
 | 
			
		||||
        """See base class."""
 | 
			
		||||
        return ["0", "1", "2", "3"]
 | 
			
		||||
 | 
			
		||||
    def _read_csv(self, input_file):
 | 
			
		||||
        with open(input_file, 'r', encoding='utf-8') as f:
 | 
			
		||||
            reader = csv.reader(f)
 | 
			
		||||
            lines = []
 | 
			
		||||
            for line in reader:
 | 
			
		||||
                if sys.version_info[0] == 2:
 | 
			
		||||
                    line = list(unicode(cell, 'utf-8') for cell in line)
 | 
			
		||||
                lines.append(line)
 | 
			
		||||
            return lines
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    def _create_examples(self, lines, type):
 | 
			
		||||
        """Creates examples for the training and dev sets."""
 | 
			
		||||
        if type == "train" and lines[0][-1] != 'label':
 | 
			
		||||
            raise ValueError(
 | 
			
		||||
                "For training, the input file must contain a label column."
 | 
			
		||||
            )
 | 
			
		||||
 | 
			
		||||
        examples = [
 | 
			
		||||
            InputExample(
 | 
			
		||||
                example_id=line[2],
 | 
			
		||||
                question=line[5],  # in the swag dataset, the
 | 
			
		||||
                # common beginning of each
 | 
			
		||||
                # choice is stored in "sent2".
 | 
			
		||||
                contexts = [line[4], line[4], line[4], line[4]],
 | 
			
		||||
                endings = [line[7], line[8], line[9], line[10]],
 | 
			
		||||
                label=line[11]
 | 
			
		||||
            ) for line in lines[1:]  # we skip the line with the column names
 | 
			
		||||
        ]
 | 
			
		||||
 | 
			
		||||
        return examples
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class ArcProcessor(DataProcessor):
 | 
			
		||||
    """Processor for the ARC data set (request from allennlp)."""
 | 
			
		||||
 | 
			
		||||
    def get_train_examples(self, data_dir):
 | 
			
		||||
        """See base class."""
 | 
			
		||||
        logger.info("LOOKING AT {} train".format(data_dir))
 | 
			
		||||
        return self._create_examples(self._read_json(os.path.join(data_dir, "train.jsonl")), "train")
 | 
			
		||||
 | 
			
		||||
    def get_dev_examples(self, data_dir):
 | 
			
		||||
        """See base class."""
 | 
			
		||||
        logger.info("LOOKING AT {} dev".format(data_dir))
 | 
			
		||||
        return self._create_examples(self._read_json(os.path.join(data_dir, "dev.jsonl")), "dev")
 | 
			
		||||
 | 
			
		||||
    def get_test_examples(self, data_dir):
 | 
			
		||||
        logger.info("LOOKING AT {} test".format(data_dir))
 | 
			
		||||
        return self._create_examples(self._read_json(os.path.join(data_dir, "test.jsonl")), "test")
 | 
			
		||||
 | 
			
		||||
    def get_labels(self):
 | 
			
		||||
        """See base class."""
 | 
			
		||||
        return ["0", "1", "2", "3"]
 | 
			
		||||
 | 
			
		||||
    def _read_json(self, input_file):
 | 
			
		||||
        with open(input_file, 'r', encoding='utf-8') as fin:
 | 
			
		||||
            lines = fin.readlines()
 | 
			
		||||
            return lines
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    def _create_examples(self, lines, type):
 | 
			
		||||
        """Creates examples for the training and dev sets."""
 | 
			
		||||
 | 
			
		||||
        #There are two types of labels. They should be normalized
 | 
			
		||||
        def normalize(truth):
 | 
			
		||||
            if truth in "ABCD":
 | 
			
		||||
                return ord(truth) - ord("A")
 | 
			
		||||
            elif truth in "1234":
 | 
			
		||||
                return int(truth) - 1
 | 
			
		||||
            else:
 | 
			
		||||
                logger.info("truth ERROR! %s", str(truth))
 | 
			
		||||
                return None
 | 
			
		||||
 | 
			
		||||
        examples = []
 | 
			
		||||
        three_choice = 0
 | 
			
		||||
        four_choice = 0
 | 
			
		||||
        five_choice = 0
 | 
			
		||||
        other_choices = 0
 | 
			
		||||
        # we deleted example which has more than or less than four choices
 | 
			
		||||
        for line in tqdm.tqdm(lines, desc="read arc data"):
 | 
			
		||||
            data_raw = json.loads(line.strip("\n"))
 | 
			
		||||
            if len(data_raw["question"]["choices"]) == 3:
 | 
			
		||||
                three_choice += 1
 | 
			
		||||
                continue
 | 
			
		||||
            elif len(data_raw["question"]["choices"]) == 5:
 | 
			
		||||
                five_choice += 1
 | 
			
		||||
                continue
 | 
			
		||||
            elif len(data_raw["question"]["choices"]) != 4:
 | 
			
		||||
                other_choices += 1
 | 
			
		||||
                continue
 | 
			
		||||
            four_choice += 1
 | 
			
		||||
            truth = str(normalize(data_raw["answerKey"]))
 | 
			
		||||
            assert truth != "None"
 | 
			
		||||
            question_choices = data_raw["question"]
 | 
			
		||||
            question = question_choices["stem"]
 | 
			
		||||
            id = data_raw["id"]
 | 
			
		||||
            options = question_choices["choices"]
 | 
			
		||||
            if len(options) == 4:
 | 
			
		||||
                examples.append(
 | 
			
		||||
                    InputExample(
 | 
			
		||||
                        example_id = id,
 | 
			
		||||
                        question=question,
 | 
			
		||||
                        contexts=[options[0]["para"].replace("_", ""), options[1]["para"].replace("_", ""),
 | 
			
		||||
                                  options[2]["para"].replace("_", ""), options[3]["para"].replace("_", "")],
 | 
			
		||||
                        endings=[options[0]["text"], options[1]["text"], options[2]["text"], options[3]["text"]],
 | 
			
		||||
                        label=truth))
 | 
			
		||||
 | 
			
		||||
        if type == "train":
 | 
			
		||||
            assert len(examples) > 1
 | 
			
		||||
            assert examples[0].label is not None
 | 
			
		||||
        logger.info("len examples: %s}", str(len(examples)))
 | 
			
		||||
        logger.info("Three choices: %s", str(three_choice))
 | 
			
		||||
        logger.info("Five choices: %s", str(five_choice))
 | 
			
		||||
        logger.info("Other choices: %s", str(other_choices))
 | 
			
		||||
        logger.info("four choices: %s", str(four_choice))
 | 
			
		||||
 | 
			
		||||
        return examples
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def convert_examples_to_features(examples, label_list, max_seq_length,
 | 
			
		||||
                                 tokenizer,
 | 
			
		||||
                                 cls_token_at_end=False,
 | 
			
		||||
                                 cls_token='[CLS]',
 | 
			
		||||
                                 cls_token_segment_id=1,
 | 
			
		||||
                                 sep_token='[SEP]',
 | 
			
		||||
                                 sequence_a_segment_id=0,
 | 
			
		||||
                                 sequence_b_segment_id=1,
 | 
			
		||||
                                 sep_token_extra=False,
 | 
			
		||||
                                 pad_token_segment_id=0,
 | 
			
		||||
                                 pad_on_left=False,
 | 
			
		||||
                                 pad_token=0,
 | 
			
		||||
                                 mask_padding_with_zero=True):
 | 
			
		||||
    """ Loads a data file into a list of `InputBatch`s
 | 
			
		||||
        `cls_token_at_end` define the location of the CLS token:
 | 
			
		||||
            - False (Default, BERT/XLM pattern): [CLS] + A + [SEP] + B + [SEP]
 | 
			
		||||
            - True (XLNet/GPT pattern): A + [SEP] + B + [SEP] + [CLS]
 | 
			
		||||
        `cls_token_segment_id` define the segment id associated to the CLS token (0 for BERT, 2 for XLNet)
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    label_map = {label : i for i, label in enumerate(label_list)}
 | 
			
		||||
 | 
			
		||||
    features = []
 | 
			
		||||
    for (ex_index, example) in tqdm.tqdm(enumerate(examples), desc="convert examples to features"):
 | 
			
		||||
        if ex_index % 10000 == 0:
 | 
			
		||||
            logger.info("Writing example %d of %d" % (ex_index, len(examples)))
 | 
			
		||||
        choices_features = []
 | 
			
		||||
        for ending_idx, (context, ending) in enumerate(zip(example.contexts, example.endings)):
 | 
			
		||||
            tokens_a = tokenizer.tokenize(context)
 | 
			
		||||
            tokens_b = None
 | 
			
		||||
            if example.question.find("_") != -1:
 | 
			
		||||
                #this is for cloze question
 | 
			
		||||
                tokens_b = tokenizer.tokenize(example.question.replace("_", ending))
 | 
			
		||||
            else:
 | 
			
		||||
                tokens_b = tokenizer.tokenize(example.question + " " + ending)
 | 
			
		||||
                # you can add seq token between quesiotn and ending. This does not make too much difference.
 | 
			
		||||
                # tokens_b = tokenizer.tokenize(example.question)
 | 
			
		||||
                # tokens_b += [sep_token]
 | 
			
		||||
                # if sep_token_extra:
 | 
			
		||||
                #     tokens_b += [sep_token]
 | 
			
		||||
                # tokens_b += tokenizer.tokenize(ending)
 | 
			
		||||
 | 
			
		||||
            special_tokens_count = 4 if sep_token_extra else 3
 | 
			
		||||
            _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - special_tokens_count)
 | 
			
		||||
 | 
			
		||||
            # The convention in BERT is:
 | 
			
		||||
            # (a) For sequence pairs:
 | 
			
		||||
            #  tokens:   [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
 | 
			
		||||
            #  type_ids:   0   0  0    0    0     0       0   0   1  1  1  1   1   1
 | 
			
		||||
            # (b) For single sequences:
 | 
			
		||||
            #  tokens:   [CLS] the dog is hairy . [SEP]
 | 
			
		||||
            #  type_ids:   0   0   0   0  0     0   0
 | 
			
		||||
            #
 | 
			
		||||
            # Where "type_ids" are used to indicate whether this is the first
 | 
			
		||||
            # sequence or the second sequence. The embedding vectors for `type=0` and
 | 
			
		||||
            # `type=1` were learned during pre-training and are added to the wordpiece
 | 
			
		||||
            # embedding vector (and position vector). This is not *strictly* necessary
 | 
			
		||||
            # since the [SEP] token unambiguously separates the sequences, but it makes
 | 
			
		||||
            # it easier for the model to learn the concept of sequences.
 | 
			
		||||
            #
 | 
			
		||||
            # For classification tasks, the first vector (corresponding to [CLS]) is
 | 
			
		||||
            # used as as the "sentence vector". Note that this only makes sense because
 | 
			
		||||
            # the entire model is fine-tuned.
 | 
			
		||||
            tokens = tokens_a + [sep_token]
 | 
			
		||||
            if sep_token_extra:
 | 
			
		||||
                # roberta uses an extra separator b/w pairs of sentences
 | 
			
		||||
                tokens += [sep_token]
 | 
			
		||||
 | 
			
		||||
            segment_ids = [sequence_a_segment_id] * len(tokens)
 | 
			
		||||
 | 
			
		||||
            if tokens_b:
 | 
			
		||||
                tokens += tokens_b + [sep_token]
 | 
			
		||||
                segment_ids += [sequence_b_segment_id] * (len(tokens_b) + 1)
 | 
			
		||||
 | 
			
		||||
            if cls_token_at_end:
 | 
			
		||||
                tokens = tokens + [cls_token]
 | 
			
		||||
                segment_ids = segment_ids + [cls_token_segment_id]
 | 
			
		||||
            else:
 | 
			
		||||
                tokens = [cls_token] + tokens
 | 
			
		||||
                segment_ids = [cls_token_segment_id] + segment_ids
 | 
			
		||||
 | 
			
		||||
            input_ids = tokenizer.convert_tokens_to_ids(tokens)
 | 
			
		||||
 | 
			
		||||
            # The mask has 1 for real tokens and 0 for padding tokens. Only real
 | 
			
		||||
            # tokens are attended to.
 | 
			
		||||
            input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
 | 
			
		||||
 | 
			
		||||
            # Zero-pad up to the sequence length.
 | 
			
		||||
            padding_length = max_seq_length - len(input_ids)
 | 
			
		||||
            if pad_on_left:
 | 
			
		||||
                input_ids = ([pad_token] * padding_length) + input_ids
 | 
			
		||||
                input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
 | 
			
		||||
                segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids
 | 
			
		||||
            else:
 | 
			
		||||
                input_ids = input_ids + ([pad_token] * padding_length)
 | 
			
		||||
                input_mask = input_mask + ([0 if mask_padding_with_zero else 1] * padding_length)
 | 
			
		||||
                segment_ids = segment_ids + ([pad_token_segment_id] * padding_length)
 | 
			
		||||
 | 
			
		||||
            assert len(input_ids) == max_seq_length
 | 
			
		||||
            assert len(input_mask) == max_seq_length
 | 
			
		||||
            assert len(segment_ids) == max_seq_length
 | 
			
		||||
            choices_features.append((tokens, input_ids, input_mask, segment_ids))
 | 
			
		||||
        label = label_map[example.label]
 | 
			
		||||
 | 
			
		||||
        if ex_index < 2:
 | 
			
		||||
            logger.info("*** Example ***")
 | 
			
		||||
            logger.info("race_id: {}".format(example.example_id))
 | 
			
		||||
            for choice_idx, (tokens, input_ids, input_mask, segment_ids) in enumerate(choices_features):
 | 
			
		||||
                logger.info("choice: {}".format(choice_idx))
 | 
			
		||||
                logger.info("tokens: {}".format(' '.join(tokens)))
 | 
			
		||||
                logger.info("input_ids: {}".format(' '.join(map(str, input_ids))))
 | 
			
		||||
                logger.info("input_mask: {}".format(' '.join(map(str, input_mask))))
 | 
			
		||||
                logger.info("segment_ids: {}".format(' '.join(map(str, segment_ids))))
 | 
			
		||||
                logger.info("label: {}".format(label))
 | 
			
		||||
 | 
			
		||||
        features.append(
 | 
			
		||||
            InputFeatures(
 | 
			
		||||
                example_id = example.example_id,
 | 
			
		||||
                choices_features = choices_features,
 | 
			
		||||
                label = label
 | 
			
		||||
            )
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
    return features
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
 | 
			
		||||
    """Truncates a sequence pair in place to the maximum length."""
 | 
			
		||||
 | 
			
		||||
    # This is a simple heuristic which will always truncate the longer sequence
 | 
			
		||||
    # one token at a time. This makes more sense than truncating an equal percent
 | 
			
		||||
    # of tokens from each, since if one sequence is very short then each token
 | 
			
		||||
    # that's truncated likely contains more information than a longer sequence.
 | 
			
		||||
 | 
			
		||||
    # However, since we'd better not to remove tokens of options and questions, you can choose to use a bigger
 | 
			
		||||
    # length or only pop from context
 | 
			
		||||
    while True:
 | 
			
		||||
        total_length = len(tokens_a) + len(tokens_b)
 | 
			
		||||
        if total_length <= max_length:
 | 
			
		||||
            break
 | 
			
		||||
        if len(tokens_a) > len(tokens_b):
 | 
			
		||||
            tokens_a.pop()
 | 
			
		||||
        else:
 | 
			
		||||
            logger.info('Attention! you are removing from token_b (swag task is ok). '
 | 
			
		||||
                        'If you are training ARC and RACE (you are poping question + options), '
 | 
			
		||||
                        'you need to try to use a bigger max seq length!')
 | 
			
		||||
            tokens_b.pop()
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
processors = {
 | 
			
		||||
    "race": RaceProcessor,
 | 
			
		||||
    "swag": SwagProcessor,
 | 
			
		||||
    "arc": ArcProcessor
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
GLUE_TASKS_NUM_LABELS = {
 | 
			
		||||
    "race", 4,
 | 
			
		||||
    "swag", 4,
 | 
			
		||||
    "arc", 4
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										996
									
								
								examples/utils_squad.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						@ -0,0 +1,996 @@
 | 
			
		||||
 | 
			
		||||
# coding=utf-8
 | 
			
		||||
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
 | 
			
		||||
# Copyright (c) 2018, NVIDIA CORPORATION.  All rights reserved.
 | 
			
		||||
#
 | 
			
		||||
# Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
# you may not use this file except in compliance with the License.
 | 
			
		||||
# You may obtain a copy of the License at
 | 
			
		||||
#
 | 
			
		||||
#     http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
#
 | 
			
		||||
# Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
# distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
# See the License for the specific language governing permissions and
 | 
			
		||||
# limitations under the License.
 | 
			
		||||
""" Load SQuAD dataset. """
 | 
			
		||||
 | 
			
		||||
from __future__ import absolute_import, division, print_function
 | 
			
		||||
 | 
			
		||||
import json
 | 
			
		||||
import logging
 | 
			
		||||
import math
 | 
			
		||||
import collections
 | 
			
		||||
from io import open
 | 
			
		||||
 | 
			
		||||
from transformers.tokenization_bert import BasicTokenizer, whitespace_tokenize
 | 
			
		||||
 | 
			
		||||
# Required by XLNet evaluation method to compute optimal threshold (see write_predictions_extended() method)
 | 
			
		||||
from utils_squad_evaluate import find_all_best_thresh_v2, make_qid_to_has_ans, get_raw_scores
 | 
			
		||||
 | 
			
		||||
logger = logging.getLogger(__name__)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class SquadExample(object):
 | 
			
		||||
    """
 | 
			
		||||
    A single training/test example for the Squad dataset.
 | 
			
		||||
    For examples without an answer, the start and end position are -1.
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    def __init__(self,
 | 
			
		||||
                 qas_id,
 | 
			
		||||
                 question_text,
 | 
			
		||||
                 doc_tokens,
 | 
			
		||||
                 orig_answer_text=None,
 | 
			
		||||
                 start_position=None,
 | 
			
		||||
                 end_position=None,
 | 
			
		||||
                 is_impossible=None):
 | 
			
		||||
        self.qas_id = qas_id
 | 
			
		||||
        self.question_text = question_text
 | 
			
		||||
        self.doc_tokens = doc_tokens
 | 
			
		||||
        self.orig_answer_text = orig_answer_text
 | 
			
		||||
        self.start_position = start_position
 | 
			
		||||
        self.end_position = end_position
 | 
			
		||||
        self.is_impossible = is_impossible
 | 
			
		||||
 | 
			
		||||
    def __str__(self):
 | 
			
		||||
        return self.__repr__()
 | 
			
		||||
 | 
			
		||||
    def __repr__(self):
 | 
			
		||||
        s = ""
 | 
			
		||||
        s += "qas_id: %s" % (self.qas_id)
 | 
			
		||||
        s += ", question_text: %s" % (
 | 
			
		||||
            self.question_text)
 | 
			
		||||
        s += ", doc_tokens: [%s]" % (" ".join(self.doc_tokens))
 | 
			
		||||
        if self.start_position:
 | 
			
		||||
            s += ", start_position: %d" % (self.start_position)
 | 
			
		||||
        if self.end_position:
 | 
			
		||||
            s += ", end_position: %d" % (self.end_position)
 | 
			
		||||
        if self.is_impossible:
 | 
			
		||||
            s += ", is_impossible: %r" % (self.is_impossible)
 | 
			
		||||
        return s
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class InputFeatures(object):
 | 
			
		||||
    """A single set of features of data."""
 | 
			
		||||
 | 
			
		||||
    def __init__(self,
 | 
			
		||||
                 unique_id,
 | 
			
		||||
                 example_index,
 | 
			
		||||
                 doc_span_index,
 | 
			
		||||
                 tokens,
 | 
			
		||||
                 token_to_orig_map,
 | 
			
		||||
                 token_is_max_context,
 | 
			
		||||
                 input_ids,
 | 
			
		||||
                 input_mask,
 | 
			
		||||
                 segment_ids,
 | 
			
		||||
                 cls_index,
 | 
			
		||||
                 p_mask,
 | 
			
		||||
                 paragraph_len,
 | 
			
		||||
                 start_position=None,
 | 
			
		||||
                 end_position=None,
 | 
			
		||||
                 is_impossible=None):
 | 
			
		||||
        self.unique_id = unique_id
 | 
			
		||||
        self.example_index = example_index
 | 
			
		||||
        self.doc_span_index = doc_span_index
 | 
			
		||||
        self.tokens = tokens
 | 
			
		||||
        self.token_to_orig_map = token_to_orig_map
 | 
			
		||||
        self.token_is_max_context = token_is_max_context
 | 
			
		||||
        self.input_ids = input_ids
 | 
			
		||||
        self.input_mask = input_mask
 | 
			
		||||
        self.segment_ids = segment_ids
 | 
			
		||||
        self.cls_index = cls_index
 | 
			
		||||
        self.p_mask = p_mask
 | 
			
		||||
        self.paragraph_len = paragraph_len
 | 
			
		||||
        self.start_position = start_position
 | 
			
		||||
        self.end_position = end_position
 | 
			
		||||
        self.is_impossible = is_impossible
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def read_squad_examples(input_file, is_training, version_2_with_negative):
 | 
			
		||||
    """Read a SQuAD json file into a list of SquadExample."""
 | 
			
		||||
    with open(input_file, "r", encoding='utf-8') as reader:
 | 
			
		||||
        input_data = json.load(reader)["data"]
 | 
			
		||||
 | 
			
		||||
    def is_whitespace(c):
 | 
			
		||||
        if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F:
 | 
			
		||||
            return True
 | 
			
		||||
        return False
 | 
			
		||||
 | 
			
		||||
    examples = []
 | 
			
		||||
    for entry in input_data:
 | 
			
		||||
        for paragraph in entry["paragraphs"]:
 | 
			
		||||
            paragraph_text = paragraph["context"]
 | 
			
		||||
            doc_tokens = []
 | 
			
		||||
            char_to_word_offset = []
 | 
			
		||||
            prev_is_whitespace = True
 | 
			
		||||
            for c in paragraph_text:
 | 
			
		||||
                if is_whitespace(c):
 | 
			
		||||
                    prev_is_whitespace = True
 | 
			
		||||
                else:
 | 
			
		||||
                    if prev_is_whitespace:
 | 
			
		||||
                        doc_tokens.append(c)
 | 
			
		||||
                    else:
 | 
			
		||||
                        doc_tokens[-1] += c
 | 
			
		||||
                    prev_is_whitespace = False
 | 
			
		||||
                char_to_word_offset.append(len(doc_tokens) - 1)
 | 
			
		||||
 | 
			
		||||
            for qa in paragraph["qas"]:
 | 
			
		||||
                qas_id = qa["id"]
 | 
			
		||||
                question_text = qa["question"]
 | 
			
		||||
                start_position = None
 | 
			
		||||
                end_position = None
 | 
			
		||||
                orig_answer_text = None
 | 
			
		||||
                is_impossible = False
 | 
			
		||||
                if is_training:
 | 
			
		||||
                    if version_2_with_negative:
 | 
			
		||||
                        is_impossible = qa["is_impossible"]
 | 
			
		||||
                    if (len(qa["answers"]) != 1) and (not is_impossible):
 | 
			
		||||
                        raise ValueError(
 | 
			
		||||
                            "For training, each question should have exactly 1 answer.")
 | 
			
		||||
                    if not is_impossible:
 | 
			
		||||
                        answer = qa["answers"][0]
 | 
			
		||||
                        orig_answer_text = answer["text"]
 | 
			
		||||
                        answer_offset = answer["answer_start"]
 | 
			
		||||
                        answer_length = len(orig_answer_text)
 | 
			
		||||
                        start_position = char_to_word_offset[answer_offset]
 | 
			
		||||
                        end_position = char_to_word_offset[answer_offset + answer_length - 1]
 | 
			
		||||
                        # Only add answers where the text can be exactly recovered from the
 | 
			
		||||
                        # document. If this CAN'T happen it's likely due to weird Unicode
 | 
			
		||||
                        # stuff so we will just skip the example.
 | 
			
		||||
                        #
 | 
			
		||||
                        # Note that this means for training mode, every example is NOT
 | 
			
		||||
                        # guaranteed to be preserved.
 | 
			
		||||
                        actual_text = " ".join(doc_tokens[start_position:(end_position + 1)])
 | 
			
		||||
                        cleaned_answer_text = " ".join(
 | 
			
		||||
                            whitespace_tokenize(orig_answer_text))
 | 
			
		||||
                        if actual_text.find(cleaned_answer_text) == -1:
 | 
			
		||||
                            logger.warning("Could not find answer: '%s' vs. '%s'",
 | 
			
		||||
                                           actual_text, cleaned_answer_text)
 | 
			
		||||
                            continue
 | 
			
		||||
                    else:
 | 
			
		||||
                        start_position = -1
 | 
			
		||||
                        end_position = -1
 | 
			
		||||
                        orig_answer_text = ""
 | 
			
		||||
 | 
			
		||||
                example = SquadExample(
 | 
			
		||||
                    qas_id=qas_id,
 | 
			
		||||
                    question_text=question_text,
 | 
			
		||||
                    doc_tokens=doc_tokens,
 | 
			
		||||
                    orig_answer_text=orig_answer_text,
 | 
			
		||||
                    start_position=start_position,
 | 
			
		||||
                    end_position=end_position,
 | 
			
		||||
                    is_impossible=is_impossible)
 | 
			
		||||
                examples.append(example)
 | 
			
		||||
    return examples
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def convert_examples_to_features(examples, tokenizer, max_seq_length,
 | 
			
		||||
                                 doc_stride, max_query_length, is_training,
 | 
			
		||||
                                 cls_token_at_end=False,
 | 
			
		||||
                                 cls_token='[CLS]', sep_token='[SEP]', pad_token=0,
 | 
			
		||||
                                 sequence_a_segment_id=0, sequence_b_segment_id=1,
 | 
			
		||||
                                 cls_token_segment_id=0, pad_token_segment_id=0,
 | 
			
		||||
                                 mask_padding_with_zero=True):
 | 
			
		||||
    """Loads a data file into a list of `InputBatch`s."""
 | 
			
		||||
 | 
			
		||||
    unique_id = 1000000000
 | 
			
		||||
    # cnt_pos, cnt_neg = 0, 0
 | 
			
		||||
    # max_N, max_M = 1024, 1024
 | 
			
		||||
    # f = np.zeros((max_N, max_M), dtype=np.float32)
 | 
			
		||||
 | 
			
		||||
    features = []
 | 
			
		||||
    for (example_index, example) in enumerate(examples):
 | 
			
		||||
 | 
			
		||||
        # if example_index % 100 == 0:
 | 
			
		||||
        #     logger.info('Converting %s/%s pos %s neg %s', example_index, len(examples), cnt_pos, cnt_neg)
 | 
			
		||||
 | 
			
		||||
        query_tokens = tokenizer.tokenize(example.question_text)
 | 
			
		||||
 | 
			
		||||
        if len(query_tokens) > max_query_length:
 | 
			
		||||
            query_tokens = query_tokens[0:max_query_length]
 | 
			
		||||
 | 
			
		||||
        tok_to_orig_index = []
 | 
			
		||||
        orig_to_tok_index = []
 | 
			
		||||
        all_doc_tokens = []
 | 
			
		||||
        for (i, token) in enumerate(example.doc_tokens):
 | 
			
		||||
            orig_to_tok_index.append(len(all_doc_tokens))
 | 
			
		||||
            sub_tokens = tokenizer.tokenize(token)
 | 
			
		||||
            for sub_token in sub_tokens:
 | 
			
		||||
                tok_to_orig_index.append(i)
 | 
			
		||||
                all_doc_tokens.append(sub_token)
 | 
			
		||||
 | 
			
		||||
        tok_start_position = None
 | 
			
		||||
        tok_end_position = None
 | 
			
		||||
        if is_training and example.is_impossible:
 | 
			
		||||
            tok_start_position = -1
 | 
			
		||||
            tok_end_position = -1
 | 
			
		||||
        if is_training and not example.is_impossible:
 | 
			
		||||
            tok_start_position = orig_to_tok_index[example.start_position]
 | 
			
		||||
            if example.end_position < len(example.doc_tokens) - 1:
 | 
			
		||||
                tok_end_position = orig_to_tok_index[example.end_position + 1] - 1
 | 
			
		||||
            else:
 | 
			
		||||
                tok_end_position = len(all_doc_tokens) - 1
 | 
			
		||||
            (tok_start_position, tok_end_position) = _improve_answer_span(
 | 
			
		||||
                all_doc_tokens, tok_start_position, tok_end_position, tokenizer,
 | 
			
		||||
                example.orig_answer_text)
 | 
			
		||||
 | 
			
		||||
        # The -3 accounts for [CLS], [SEP] and [SEP]
 | 
			
		||||
        max_tokens_for_doc = max_seq_length - len(query_tokens) - 3
 | 
			
		||||
 | 
			
		||||
        # We can have documents that are longer than the maximum sequence length.
 | 
			
		||||
        # To deal with this we do a sliding window approach, where we take chunks
 | 
			
		||||
        # of the up to our max length with a stride of `doc_stride`.
 | 
			
		||||
        _DocSpan = collections.namedtuple(  # pylint: disable=invalid-name
 | 
			
		||||
            "DocSpan", ["start", "length"])
 | 
			
		||||
        doc_spans = []
 | 
			
		||||
        start_offset = 0
 | 
			
		||||
        while start_offset < len(all_doc_tokens):
 | 
			
		||||
            length = len(all_doc_tokens) - start_offset
 | 
			
		||||
            if length > max_tokens_for_doc:
 | 
			
		||||
                length = max_tokens_for_doc
 | 
			
		||||
            doc_spans.append(_DocSpan(start=start_offset, length=length))
 | 
			
		||||
            if start_offset + length == len(all_doc_tokens):
 | 
			
		||||
                break
 | 
			
		||||
            start_offset += min(length, doc_stride)
 | 
			
		||||
 | 
			
		||||
        for (doc_span_index, doc_span) in enumerate(doc_spans):
 | 
			
		||||
            tokens = []
 | 
			
		||||
            token_to_orig_map = {}
 | 
			
		||||
            token_is_max_context = {}
 | 
			
		||||
            segment_ids = []
 | 
			
		||||
 | 
			
		||||
            # p_mask: mask with 1 for token than cannot be in the answer (0 for token which can be in an answer)
 | 
			
		||||
            # Original TF implem also keep the classification token (set to 0) (not sure why...)
 | 
			
		||||
            p_mask = []
 | 
			
		||||
 | 
			
		||||
            # CLS token at the beginning
 | 
			
		||||
            if not cls_token_at_end:
 | 
			
		||||
                tokens.append(cls_token)
 | 
			
		||||
                segment_ids.append(cls_token_segment_id)
 | 
			
		||||
                p_mask.append(0)
 | 
			
		||||
                cls_index = 0
 | 
			
		||||
 | 
			
		||||
            # Query
 | 
			
		||||
            for token in query_tokens:
 | 
			
		||||
                tokens.append(token)
 | 
			
		||||
                segment_ids.append(sequence_a_segment_id)
 | 
			
		||||
                p_mask.append(1)
 | 
			
		||||
 | 
			
		||||
            # SEP token
 | 
			
		||||
            tokens.append(sep_token)
 | 
			
		||||
            segment_ids.append(sequence_a_segment_id)
 | 
			
		||||
            p_mask.append(1)
 | 
			
		||||
 | 
			
		||||
            # Paragraph
 | 
			
		||||
            for i in range(doc_span.length):
 | 
			
		||||
                split_token_index = doc_span.start + i
 | 
			
		||||
                token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index]
 | 
			
		||||
 | 
			
		||||
                is_max_context = _check_is_max_context(doc_spans, doc_span_index,
 | 
			
		||||
                                                       split_token_index)
 | 
			
		||||
                token_is_max_context[len(tokens)] = is_max_context
 | 
			
		||||
                tokens.append(all_doc_tokens[split_token_index])
 | 
			
		||||
                segment_ids.append(sequence_b_segment_id)
 | 
			
		||||
                p_mask.append(0)
 | 
			
		||||
            paragraph_len = doc_span.length
 | 
			
		||||
 | 
			
		||||
            # SEP token
 | 
			
		||||
            tokens.append(sep_token)
 | 
			
		||||
            segment_ids.append(sequence_b_segment_id)
 | 
			
		||||
            p_mask.append(1)
 | 
			
		||||
 | 
			
		||||
            # CLS token at the end
 | 
			
		||||
            if cls_token_at_end:
 | 
			
		||||
                tokens.append(cls_token)
 | 
			
		||||
                segment_ids.append(cls_token_segment_id)
 | 
			
		||||
                p_mask.append(0)
 | 
			
		||||
                cls_index = len(tokens) - 1  # Index of classification token
 | 
			
		||||
 | 
			
		||||
            input_ids = tokenizer.convert_tokens_to_ids(tokens)
 | 
			
		||||
 | 
			
		||||
            # The mask has 1 for real tokens and 0 for padding tokens. Only real
 | 
			
		||||
            # tokens are attended to.
 | 
			
		||||
            input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
 | 
			
		||||
 | 
			
		||||
            # Zero-pad up to the sequence length.
 | 
			
		||||
            while len(input_ids) < max_seq_length:
 | 
			
		||||
                input_ids.append(pad_token)
 | 
			
		||||
                input_mask.append(0 if mask_padding_with_zero else 1)
 | 
			
		||||
                segment_ids.append(pad_token_segment_id)
 | 
			
		||||
                p_mask.append(1)
 | 
			
		||||
 | 
			
		||||
            assert len(input_ids) == max_seq_length
 | 
			
		||||
            assert len(input_mask) == max_seq_length
 | 
			
		||||
            assert len(segment_ids) == max_seq_length
 | 
			
		||||
 | 
			
		||||
            span_is_impossible = example.is_impossible
 | 
			
		||||
            start_position = None
 | 
			
		||||
            end_position = None
 | 
			
		||||
            if is_training and not span_is_impossible:
 | 
			
		||||
                # For training, if our document chunk does not contain an annotation
 | 
			
		||||
                # we throw it out, since there is nothing to predict.
 | 
			
		||||
                doc_start = doc_span.start
 | 
			
		||||
                doc_end = doc_span.start + doc_span.length - 1
 | 
			
		||||
                out_of_span = False
 | 
			
		||||
                if not (tok_start_position >= doc_start and
 | 
			
		||||
                        tok_end_position <= doc_end):
 | 
			
		||||
                    out_of_span = True
 | 
			
		||||
                if out_of_span:
 | 
			
		||||
                    start_position = 0
 | 
			
		||||
                    end_position = 0
 | 
			
		||||
                    span_is_impossible = True
 | 
			
		||||
                else:
 | 
			
		||||
                    doc_offset = len(query_tokens) + 2
 | 
			
		||||
                    start_position = tok_start_position - doc_start + doc_offset
 | 
			
		||||
                    end_position = tok_end_position - doc_start + doc_offset
 | 
			
		||||
 | 
			
		||||
            if is_training and span_is_impossible:
 | 
			
		||||
                start_position = cls_index
 | 
			
		||||
                end_position = cls_index
 | 
			
		||||
 | 
			
		||||
            if example_index < 20:
 | 
			
		||||
                logger.info("*** Example ***")
 | 
			
		||||
                logger.info("unique_id: %s" % (unique_id))
 | 
			
		||||
                logger.info("example_index: %s" % (example_index))
 | 
			
		||||
                logger.info("doc_span_index: %s" % (doc_span_index))
 | 
			
		||||
                logger.info("tokens: %s" % " ".join(tokens))
 | 
			
		||||
                logger.info("token_to_orig_map: %s" % " ".join([
 | 
			
		||||
                    "%d:%d" % (x, y) for (x, y) in token_to_orig_map.items()]))
 | 
			
		||||
                logger.info("token_is_max_context: %s" % " ".join([
 | 
			
		||||
                    "%d:%s" % (x, y) for (x, y) in token_is_max_context.items()
 | 
			
		||||
                ]))
 | 
			
		||||
                logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
 | 
			
		||||
                logger.info(
 | 
			
		||||
                    "input_mask: %s" % " ".join([str(x) for x in input_mask]))
 | 
			
		||||
                logger.info(
 | 
			
		||||
                    "segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
 | 
			
		||||
                if is_training and span_is_impossible:
 | 
			
		||||
                    logger.info("impossible example")
 | 
			
		||||
                if is_training and not span_is_impossible:
 | 
			
		||||
                    answer_text = " ".join(tokens[start_position:(end_position + 1)])
 | 
			
		||||
                    logger.info("start_position: %d" % (start_position))
 | 
			
		||||
                    logger.info("end_position: %d" % (end_position))
 | 
			
		||||
                    logger.info(
 | 
			
		||||
                        "answer: %s" % (answer_text))
 | 
			
		||||
 | 
			
		||||
            features.append(
 | 
			
		||||
                InputFeatures(
 | 
			
		||||
                    unique_id=unique_id,
 | 
			
		||||
                    example_index=example_index,
 | 
			
		||||
                    doc_span_index=doc_span_index,
 | 
			
		||||
                    tokens=tokens,
 | 
			
		||||
                    token_to_orig_map=token_to_orig_map,
 | 
			
		||||
                    token_is_max_context=token_is_max_context,
 | 
			
		||||
                    input_ids=input_ids,
 | 
			
		||||
                    input_mask=input_mask,
 | 
			
		||||
                    segment_ids=segment_ids,
 | 
			
		||||
                    cls_index=cls_index,
 | 
			
		||||
                    p_mask=p_mask,
 | 
			
		||||
                    paragraph_len=paragraph_len,
 | 
			
		||||
                    start_position=start_position,
 | 
			
		||||
                    end_position=end_position,
 | 
			
		||||
                    is_impossible=span_is_impossible))
 | 
			
		||||
            unique_id += 1
 | 
			
		||||
 | 
			
		||||
    return features
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer,
 | 
			
		||||
                         orig_answer_text):
 | 
			
		||||
    """Returns tokenized answer spans that better match the annotated answer."""
 | 
			
		||||
 | 
			
		||||
    # The SQuAD annotations are character based. We first project them to
 | 
			
		||||
    # whitespace-tokenized words. But then after WordPiece tokenization, we can
 | 
			
		||||
    # often find a "better match". For example:
 | 
			
		||||
    #
 | 
			
		||||
    #   Question: What year was John Smith born?
 | 
			
		||||
    #   Context: The leader was John Smith (1895-1943).
 | 
			
		||||
    #   Answer: 1895
 | 
			
		||||
    #
 | 
			
		||||
    # The original whitespace-tokenized answer will be "(1895-1943).". However
 | 
			
		||||
    # after tokenization, our tokens will be "( 1895 - 1943 ) .". So we can match
 | 
			
		||||
    # the exact answer, 1895.
 | 
			
		||||
    #
 | 
			
		||||
    # However, this is not always possible. Consider the following:
 | 
			
		||||
    #
 | 
			
		||||
    #   Question: What country is the top exporter of electornics?
 | 
			
		||||
    #   Context: The Japanese electronics industry is the lagest in the world.
 | 
			
		||||
    #   Answer: Japan
 | 
			
		||||
    #
 | 
			
		||||
    # In this case, the annotator chose "Japan" as a character sub-span of
 | 
			
		||||
    # the word "Japanese". Since our WordPiece tokenizer does not split
 | 
			
		||||
    # "Japanese", we just use "Japanese" as the annotation. This is fairly rare
 | 
			
		||||
    # in SQuAD, but does happen.
 | 
			
		||||
    tok_answer_text = " ".join(tokenizer.tokenize(orig_answer_text))
 | 
			
		||||
 | 
			
		||||
    for new_start in range(input_start, input_end + 1):
 | 
			
		||||
        for new_end in range(input_end, new_start - 1, -1):
 | 
			
		||||
            text_span = " ".join(doc_tokens[new_start:(new_end + 1)])
 | 
			
		||||
            if text_span == tok_answer_text:
 | 
			
		||||
                return (new_start, new_end)
 | 
			
		||||
 | 
			
		||||
    return (input_start, input_end)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def _check_is_max_context(doc_spans, cur_span_index, position):
 | 
			
		||||
    """Check if this is the 'max context' doc span for the token."""
 | 
			
		||||
 | 
			
		||||
    # Because of the sliding window approach taken to scoring documents, a single
 | 
			
		||||
    # token can appear in multiple documents. E.g.
 | 
			
		||||
    #  Doc: the man went to the store and bought a gallon of milk
 | 
			
		||||
    #  Span A: the man went to the
 | 
			
		||||
    #  Span B: to the store and bought
 | 
			
		||||
    #  Span C: and bought a gallon of
 | 
			
		||||
    #  ...
 | 
			
		||||
    #
 | 
			
		||||
    # Now the word 'bought' will have two scores from spans B and C. We only
 | 
			
		||||
    # want to consider the score with "maximum context", which we define as
 | 
			
		||||
    # the *minimum* of its left and right context (the *sum* of left and
 | 
			
		||||
    # right context will always be the same, of course).
 | 
			
		||||
    #
 | 
			
		||||
    # In the example the maximum context for 'bought' would be span C since
 | 
			
		||||
    # it has 1 left context and 3 right context, while span B has 4 left context
 | 
			
		||||
    # and 0 right context.
 | 
			
		||||
    best_score = None
 | 
			
		||||
    best_span_index = None
 | 
			
		||||
    for (span_index, doc_span) in enumerate(doc_spans):
 | 
			
		||||
        end = doc_span.start + doc_span.length - 1
 | 
			
		||||
        if position < doc_span.start:
 | 
			
		||||
            continue
 | 
			
		||||
        if position > end:
 | 
			
		||||
            continue
 | 
			
		||||
        num_left_context = position - doc_span.start
 | 
			
		||||
        num_right_context = end - position
 | 
			
		||||
        score = min(num_left_context, num_right_context) + 0.01 * doc_span.length
 | 
			
		||||
        if best_score is None or score > best_score:
 | 
			
		||||
            best_score = score
 | 
			
		||||
            best_span_index = span_index
 | 
			
		||||
 | 
			
		||||
    return cur_span_index == best_span_index
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
RawResult = collections.namedtuple("RawResult",
 | 
			
		||||
                                   ["unique_id", "start_logits", "end_logits"])
 | 
			
		||||
 | 
			
		||||
def write_predictions(all_examples, all_features, all_results, n_best_size,
 | 
			
		||||
                      max_answer_length, do_lower_case, output_prediction_file,
 | 
			
		||||
                      output_nbest_file, output_null_log_odds_file, verbose_logging,
 | 
			
		||||
                      version_2_with_negative, null_score_diff_threshold):
 | 
			
		||||
    """Write final predictions to the json file and log-odds of null if needed."""
 | 
			
		||||
    logger.info("Writing predictions to: %s" % (output_prediction_file))
 | 
			
		||||
    logger.info("Writing nbest to: %s" % (output_nbest_file))
 | 
			
		||||
 | 
			
		||||
    example_index_to_features = collections.defaultdict(list)
 | 
			
		||||
    for feature in all_features:
 | 
			
		||||
        example_index_to_features[feature.example_index].append(feature)
 | 
			
		||||
 | 
			
		||||
    unique_id_to_result = {}
 | 
			
		||||
    for result in all_results:
 | 
			
		||||
        unique_id_to_result[result.unique_id] = result
 | 
			
		||||
 | 
			
		||||
    _PrelimPrediction = collections.namedtuple(  # pylint: disable=invalid-name
 | 
			
		||||
        "PrelimPrediction",
 | 
			
		||||
        ["feature_index", "start_index", "end_index", "start_logit", "end_logit"])
 | 
			
		||||
 | 
			
		||||
    all_predictions = collections.OrderedDict()
 | 
			
		||||
    all_nbest_json = collections.OrderedDict()
 | 
			
		||||
    scores_diff_json = collections.OrderedDict()
 | 
			
		||||
 | 
			
		||||
    for (example_index, example) in enumerate(all_examples):
 | 
			
		||||
        features = example_index_to_features[example_index]
 | 
			
		||||
 | 
			
		||||
        prelim_predictions = []
 | 
			
		||||
        # keep track of the minimum score of null start+end of position 0
 | 
			
		||||
        score_null = 1000000  # large and positive
 | 
			
		||||
        min_null_feature_index = 0  # the paragraph slice with min null score
 | 
			
		||||
        null_start_logit = 0  # the start logit at the slice with min null score
 | 
			
		||||
        null_end_logit = 0  # the end logit at the slice with min null score
 | 
			
		||||
        for (feature_index, feature) in enumerate(features):
 | 
			
		||||
            result = unique_id_to_result[feature.unique_id]
 | 
			
		||||
            start_indexes = _get_best_indexes(result.start_logits, n_best_size)
 | 
			
		||||
            end_indexes = _get_best_indexes(result.end_logits, n_best_size)
 | 
			
		||||
            # if we could have irrelevant answers, get the min score of irrelevant
 | 
			
		||||
            if version_2_with_negative:
 | 
			
		||||
                feature_null_score = result.start_logits[0] + result.end_logits[0]
 | 
			
		||||
                if feature_null_score < score_null:
 | 
			
		||||
                    score_null = feature_null_score
 | 
			
		||||
                    min_null_feature_index = feature_index
 | 
			
		||||
                    null_start_logit = result.start_logits[0]
 | 
			
		||||
                    null_end_logit = result.end_logits[0]
 | 
			
		||||
            for start_index in start_indexes:
 | 
			
		||||
                for end_index in end_indexes:
 | 
			
		||||
                    # We could hypothetically create invalid predictions, e.g., predict
 | 
			
		||||
                    # that the start of the span is in the question. We throw out all
 | 
			
		||||
                    # invalid predictions.
 | 
			
		||||
                    if start_index >= len(feature.tokens):
 | 
			
		||||
                        continue
 | 
			
		||||
                    if end_index >= len(feature.tokens):
 | 
			
		||||
                        continue
 | 
			
		||||
                    if start_index not in feature.token_to_orig_map:
 | 
			
		||||
                        continue
 | 
			
		||||
                    if end_index not in feature.token_to_orig_map:
 | 
			
		||||
                        continue
 | 
			
		||||
                    if not feature.token_is_max_context.get(start_index, False):
 | 
			
		||||
                        continue
 | 
			
		||||
                    if end_index < start_index:
 | 
			
		||||
                        continue
 | 
			
		||||
                    length = end_index - start_index + 1
 | 
			
		||||
                    if length > max_answer_length:
 | 
			
		||||
                        continue
 | 
			
		||||
                    prelim_predictions.append(
 | 
			
		||||
                        _PrelimPrediction(
 | 
			
		||||
                            feature_index=feature_index,
 | 
			
		||||
                            start_index=start_index,
 | 
			
		||||
                            end_index=end_index,
 | 
			
		||||
                            start_logit=result.start_logits[start_index],
 | 
			
		||||
                            end_logit=result.end_logits[end_index]))
 | 
			
		||||
        if version_2_with_negative:
 | 
			
		||||
            prelim_predictions.append(
 | 
			
		||||
                _PrelimPrediction(
 | 
			
		||||
                    feature_index=min_null_feature_index,
 | 
			
		||||
                    start_index=0,
 | 
			
		||||
                    end_index=0,
 | 
			
		||||
                    start_logit=null_start_logit,
 | 
			
		||||
                    end_logit=null_end_logit))
 | 
			
		||||
        prelim_predictions = sorted(
 | 
			
		||||
            prelim_predictions,
 | 
			
		||||
            key=lambda x: (x.start_logit + x.end_logit),
 | 
			
		||||
            reverse=True)
 | 
			
		||||
 | 
			
		||||
        _NbestPrediction = collections.namedtuple(  # pylint: disable=invalid-name
 | 
			
		||||
            "NbestPrediction", ["text", "start_logit", "end_logit"])
 | 
			
		||||
 | 
			
		||||
        seen_predictions = {}
 | 
			
		||||
        nbest = []
 | 
			
		||||
        for pred in prelim_predictions:
 | 
			
		||||
            if len(nbest) >= n_best_size:
 | 
			
		||||
                break
 | 
			
		||||
            feature = features[pred.feature_index]
 | 
			
		||||
            if pred.start_index > 0:  # this is a non-null prediction
 | 
			
		||||
                tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)]
 | 
			
		||||
                orig_doc_start = feature.token_to_orig_map[pred.start_index]
 | 
			
		||||
                orig_doc_end = feature.token_to_orig_map[pred.end_index]
 | 
			
		||||
                orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)]
 | 
			
		||||
                tok_text = " ".join(tok_tokens)
 | 
			
		||||
 | 
			
		||||
                # De-tokenize WordPieces that have been split off.
 | 
			
		||||
                tok_text = tok_text.replace(" ##", "")
 | 
			
		||||
                tok_text = tok_text.replace("##", "")
 | 
			
		||||
 | 
			
		||||
                # Clean whitespace
 | 
			
		||||
                tok_text = tok_text.strip()
 | 
			
		||||
                tok_text = " ".join(tok_text.split())
 | 
			
		||||
                orig_text = " ".join(orig_tokens)
 | 
			
		||||
 | 
			
		||||
                final_text = get_final_text(tok_text, orig_text, do_lower_case, verbose_logging)
 | 
			
		||||
                if final_text in seen_predictions:
 | 
			
		||||
                    continue
 | 
			
		||||
 | 
			
		||||
                seen_predictions[final_text] = True
 | 
			
		||||
            else:
 | 
			
		||||
                final_text = ""
 | 
			
		||||
                seen_predictions[final_text] = True
 | 
			
		||||
 | 
			
		||||
            nbest.append(
 | 
			
		||||
                _NbestPrediction(
 | 
			
		||||
                    text=final_text,
 | 
			
		||||
                    start_logit=pred.start_logit,
 | 
			
		||||
                    end_logit=pred.end_logit))
 | 
			
		||||
        # if we didn't include the empty option in the n-best, include it
 | 
			
		||||
        if version_2_with_negative:
 | 
			
		||||
            if "" not in seen_predictions:
 | 
			
		||||
                nbest.append(
 | 
			
		||||
                    _NbestPrediction(
 | 
			
		||||
                        text="",
 | 
			
		||||
                        start_logit=null_start_logit,
 | 
			
		||||
                        end_logit=null_end_logit))
 | 
			
		||||
                
 | 
			
		||||
            # In very rare edge cases we could only have single null prediction.
 | 
			
		||||
            # So we just create a nonce prediction in this case to avoid failure.
 | 
			
		||||
            if len(nbest)==1:
 | 
			
		||||
                nbest.insert(0,
 | 
			
		||||
                    _NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0))
 | 
			
		||||
 | 
			
		||||
        # In very rare edge cases we could have no valid predictions. So we
 | 
			
		||||
        # just create a nonce prediction in this case to avoid failure.
 | 
			
		||||
        if not nbest:
 | 
			
		||||
            nbest.append(
 | 
			
		||||
                _NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0))
 | 
			
		||||
 | 
			
		||||
        assert len(nbest) >= 1
 | 
			
		||||
 | 
			
		||||
        total_scores = []
 | 
			
		||||
        best_non_null_entry = None
 | 
			
		||||
        for entry in nbest:
 | 
			
		||||
            total_scores.append(entry.start_logit + entry.end_logit)
 | 
			
		||||
            if not best_non_null_entry:
 | 
			
		||||
                if entry.text:
 | 
			
		||||
                    best_non_null_entry = entry
 | 
			
		||||
 | 
			
		||||
        probs = _compute_softmax(total_scores)
 | 
			
		||||
 | 
			
		||||
        nbest_json = []
 | 
			
		||||
        for (i, entry) in enumerate(nbest):
 | 
			
		||||
            output = collections.OrderedDict()
 | 
			
		||||
            output["text"] = entry.text
 | 
			
		||||
            output["probability"] = probs[i]
 | 
			
		||||
            output["start_logit"] = entry.start_logit
 | 
			
		||||
            output["end_logit"] = entry.end_logit
 | 
			
		||||
            nbest_json.append(output)
 | 
			
		||||
 | 
			
		||||
        assert len(nbest_json) >= 1
 | 
			
		||||
 | 
			
		||||
        if not version_2_with_negative:
 | 
			
		||||
            all_predictions[example.qas_id] = nbest_json[0]["text"]
 | 
			
		||||
        else:
 | 
			
		||||
            # predict "" iff the null score - the score of best non-null > threshold
 | 
			
		||||
            score_diff = score_null - best_non_null_entry.start_logit - (
 | 
			
		||||
                best_non_null_entry.end_logit)
 | 
			
		||||
            scores_diff_json[example.qas_id] = score_diff
 | 
			
		||||
            if score_diff > null_score_diff_threshold:
 | 
			
		||||
                all_predictions[example.qas_id] = ""
 | 
			
		||||
            else:
 | 
			
		||||
                all_predictions[example.qas_id] = best_non_null_entry.text
 | 
			
		||||
        all_nbest_json[example.qas_id] = nbest_json
 | 
			
		||||
 | 
			
		||||
    with open(output_prediction_file, "w") as writer:
 | 
			
		||||
        writer.write(json.dumps(all_predictions, indent=4) + "\n")
 | 
			
		||||
 | 
			
		||||
    with open(output_nbest_file, "w") as writer:
 | 
			
		||||
        writer.write(json.dumps(all_nbest_json, indent=4) + "\n")
 | 
			
		||||
 | 
			
		||||
    if version_2_with_negative:
 | 
			
		||||
        with open(output_null_log_odds_file, "w") as writer:
 | 
			
		||||
            writer.write(json.dumps(scores_diff_json, indent=4) + "\n")
 | 
			
		||||
 | 
			
		||||
    return all_predictions
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# For XLNet (and XLM which uses the same head)
 | 
			
		||||
RawResultExtended = collections.namedtuple("RawResultExtended",
 | 
			
		||||
    ["unique_id", "start_top_log_probs", "start_top_index",
 | 
			
		||||
     "end_top_log_probs", "end_top_index", "cls_logits"])
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def write_predictions_extended(all_examples, all_features, all_results, n_best_size,
 | 
			
		||||
                                max_answer_length, output_prediction_file,
 | 
			
		||||
                                output_nbest_file,
 | 
			
		||||
                                output_null_log_odds_file, orig_data_file,
 | 
			
		||||
                                start_n_top, end_n_top, version_2_with_negative,
 | 
			
		||||
                                tokenizer, verbose_logging):
 | 
			
		||||
    """ XLNet write prediction logic (more complex than Bert's).
 | 
			
		||||
        Write final predictions to the json file and log-odds of null if needed.
 | 
			
		||||
 | 
			
		||||
        Requires utils_squad_evaluate.py
 | 
			
		||||
    """
 | 
			
		||||
    _PrelimPrediction = collections.namedtuple(  # pylint: disable=invalid-name
 | 
			
		||||
        "PrelimPrediction",
 | 
			
		||||
        ["feature_index", "start_index", "end_index",
 | 
			
		||||
        "start_log_prob", "end_log_prob"])
 | 
			
		||||
 | 
			
		||||
    _NbestPrediction = collections.namedtuple(  # pylint: disable=invalid-name
 | 
			
		||||
        "NbestPrediction", ["text", "start_log_prob", "end_log_prob"])
 | 
			
		||||
 | 
			
		||||
    logger.info("Writing predictions to: %s", output_prediction_file)
 | 
			
		||||
    # logger.info("Writing nbest to: %s" % (output_nbest_file))
 | 
			
		||||
 | 
			
		||||
    example_index_to_features = collections.defaultdict(list)
 | 
			
		||||
    for feature in all_features:
 | 
			
		||||
        example_index_to_features[feature.example_index].append(feature)
 | 
			
		||||
 | 
			
		||||
    unique_id_to_result = {}
 | 
			
		||||
    for result in all_results:
 | 
			
		||||
        unique_id_to_result[result.unique_id] = result
 | 
			
		||||
 | 
			
		||||
    all_predictions = collections.OrderedDict()
 | 
			
		||||
    all_nbest_json = collections.OrderedDict()
 | 
			
		||||
    scores_diff_json = collections.OrderedDict()
 | 
			
		||||
 | 
			
		||||
    for (example_index, example) in enumerate(all_examples):
 | 
			
		||||
        features = example_index_to_features[example_index]
 | 
			
		||||
 | 
			
		||||
        prelim_predictions = []
 | 
			
		||||
        # keep track of the minimum score of null start+end of position 0
 | 
			
		||||
        score_null = 1000000  # large and positive
 | 
			
		||||
 | 
			
		||||
        for (feature_index, feature) in enumerate(features):
 | 
			
		||||
            result = unique_id_to_result[feature.unique_id]
 | 
			
		||||
 | 
			
		||||
            cur_null_score = result.cls_logits
 | 
			
		||||
 | 
			
		||||
            # if we could have irrelevant answers, get the min score of irrelevant
 | 
			
		||||
            score_null = min(score_null, cur_null_score)
 | 
			
		||||
 | 
			
		||||
            for i in range(start_n_top):
 | 
			
		||||
                for j in range(end_n_top):
 | 
			
		||||
                    start_log_prob = result.start_top_log_probs[i]
 | 
			
		||||
                    start_index = result.start_top_index[i]
 | 
			
		||||
 | 
			
		||||
                    j_index = i * end_n_top + j
 | 
			
		||||
 | 
			
		||||
                    end_log_prob = result.end_top_log_probs[j_index]
 | 
			
		||||
                    end_index = result.end_top_index[j_index]
 | 
			
		||||
 | 
			
		||||
                    # We could hypothetically create invalid predictions, e.g., predict
 | 
			
		||||
                    # that the start of the span is in the question. We throw out all
 | 
			
		||||
                    # invalid predictions.
 | 
			
		||||
                    if start_index >= feature.paragraph_len - 1:
 | 
			
		||||
                        continue
 | 
			
		||||
                    if end_index >= feature.paragraph_len - 1:
 | 
			
		||||
                        continue
 | 
			
		||||
 | 
			
		||||
                    if not feature.token_is_max_context.get(start_index, False):
 | 
			
		||||
                        continue
 | 
			
		||||
                    if end_index < start_index:
 | 
			
		||||
                        continue
 | 
			
		||||
                    length = end_index - start_index + 1
 | 
			
		||||
                    if length > max_answer_length:
 | 
			
		||||
                        continue
 | 
			
		||||
 | 
			
		||||
                    prelim_predictions.append(
 | 
			
		||||
                        _PrelimPrediction(
 | 
			
		||||
                            feature_index=feature_index,
 | 
			
		||||
                            start_index=start_index,
 | 
			
		||||
                            end_index=end_index,
 | 
			
		||||
                            start_log_prob=start_log_prob,
 | 
			
		||||
                            end_log_prob=end_log_prob))
 | 
			
		||||
 | 
			
		||||
        prelim_predictions = sorted(
 | 
			
		||||
            prelim_predictions,
 | 
			
		||||
            key=lambda x: (x.start_log_prob + x.end_log_prob),
 | 
			
		||||
            reverse=True)
 | 
			
		||||
 | 
			
		||||
        seen_predictions = {}
 | 
			
		||||
        nbest = []
 | 
			
		||||
        for pred in prelim_predictions:
 | 
			
		||||
            if len(nbest) >= n_best_size:
 | 
			
		||||
                break
 | 
			
		||||
            feature = features[pred.feature_index]
 | 
			
		||||
 | 
			
		||||
            # XLNet un-tokenizer
 | 
			
		||||
            # Let's keep it simple for now and see if we need all this later.
 | 
			
		||||
            # 
 | 
			
		||||
            # tok_start_to_orig_index = feature.tok_start_to_orig_index
 | 
			
		||||
            # tok_end_to_orig_index = feature.tok_end_to_orig_index
 | 
			
		||||
            # start_orig_pos = tok_start_to_orig_index[pred.start_index]
 | 
			
		||||
            # end_orig_pos = tok_end_to_orig_index[pred.end_index]
 | 
			
		||||
            # paragraph_text = example.paragraph_text
 | 
			
		||||
            # final_text = paragraph_text[start_orig_pos: end_orig_pos + 1].strip()
 | 
			
		||||
 | 
			
		||||
            # Previously used Bert untokenizer
 | 
			
		||||
            tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)]
 | 
			
		||||
            orig_doc_start = feature.token_to_orig_map[pred.start_index]
 | 
			
		||||
            orig_doc_end = feature.token_to_orig_map[pred.end_index]
 | 
			
		||||
            orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)]
 | 
			
		||||
            tok_text = tokenizer.convert_tokens_to_string(tok_tokens)
 | 
			
		||||
 | 
			
		||||
            # Clean whitespace
 | 
			
		||||
            tok_text = tok_text.strip()
 | 
			
		||||
            tok_text = " ".join(tok_text.split())
 | 
			
		||||
            orig_text = " ".join(orig_tokens)
 | 
			
		||||
 | 
			
		||||
            final_text = get_final_text(tok_text, orig_text, tokenizer.do_lower_case,
 | 
			
		||||
                                        verbose_logging)
 | 
			
		||||
 | 
			
		||||
            if final_text in seen_predictions:
 | 
			
		||||
                continue
 | 
			
		||||
 | 
			
		||||
            seen_predictions[final_text] = True
 | 
			
		||||
 | 
			
		||||
            nbest.append(
 | 
			
		||||
                _NbestPrediction(
 | 
			
		||||
                    text=final_text,
 | 
			
		||||
                    start_log_prob=pred.start_log_prob,
 | 
			
		||||
                    end_log_prob=pred.end_log_prob))
 | 
			
		||||
 | 
			
		||||
        # In very rare edge cases we could have no valid predictions. So we
 | 
			
		||||
        # just create a nonce prediction in this case to avoid failure.
 | 
			
		||||
        if not nbest:
 | 
			
		||||
            nbest.append(
 | 
			
		||||
                _NbestPrediction(text="", start_log_prob=-1e6,
 | 
			
		||||
                end_log_prob=-1e6))
 | 
			
		||||
 | 
			
		||||
        total_scores = []
 | 
			
		||||
        best_non_null_entry = None
 | 
			
		||||
        for entry in nbest:
 | 
			
		||||
            total_scores.append(entry.start_log_prob + entry.end_log_prob)
 | 
			
		||||
            if not best_non_null_entry:
 | 
			
		||||
                best_non_null_entry = entry
 | 
			
		||||
 | 
			
		||||
        probs = _compute_softmax(total_scores)
 | 
			
		||||
 | 
			
		||||
        nbest_json = []
 | 
			
		||||
        for (i, entry) in enumerate(nbest):
 | 
			
		||||
            output = collections.OrderedDict()
 | 
			
		||||
            output["text"] = entry.text
 | 
			
		||||
            output["probability"] = probs[i]
 | 
			
		||||
            output["start_log_prob"] = entry.start_log_prob
 | 
			
		||||
            output["end_log_prob"] = entry.end_log_prob
 | 
			
		||||
            nbest_json.append(output)
 | 
			
		||||
 | 
			
		||||
        assert len(nbest_json) >= 1
 | 
			
		||||
        assert best_non_null_entry is not None
 | 
			
		||||
 | 
			
		||||
        score_diff = score_null
 | 
			
		||||
        scores_diff_json[example.qas_id] = score_diff
 | 
			
		||||
        # note(zhiliny): always predict best_non_null_entry
 | 
			
		||||
        # and the evaluation script will search for the best threshold
 | 
			
		||||
        all_predictions[example.qas_id] = best_non_null_entry.text
 | 
			
		||||
 | 
			
		||||
        all_nbest_json[example.qas_id] = nbest_json
 | 
			
		||||
 | 
			
		||||
    with open(output_prediction_file, "w") as writer:
 | 
			
		||||
        writer.write(json.dumps(all_predictions, indent=4) + "\n")
 | 
			
		||||
 | 
			
		||||
    with open(output_nbest_file, "w") as writer:
 | 
			
		||||
        writer.write(json.dumps(all_nbest_json, indent=4) + "\n")
 | 
			
		||||
 | 
			
		||||
    if version_2_with_negative:
 | 
			
		||||
        with open(output_null_log_odds_file, "w") as writer:
 | 
			
		||||
            writer.write(json.dumps(scores_diff_json, indent=4) + "\n")
 | 
			
		||||
 | 
			
		||||
    with open(orig_data_file, "r", encoding='utf-8') as reader:
 | 
			
		||||
        orig_data = json.load(reader)["data"]
 | 
			
		||||
 | 
			
		||||
    qid_to_has_ans = make_qid_to_has_ans(orig_data)
 | 
			
		||||
    has_ans_qids = [k for k, v in qid_to_has_ans.items() if v]
 | 
			
		||||
    no_ans_qids = [k for k, v in qid_to_has_ans.items() if not v]
 | 
			
		||||
    exact_raw, f1_raw = get_raw_scores(orig_data, all_predictions)
 | 
			
		||||
    out_eval = {}
 | 
			
		||||
 | 
			
		||||
    find_all_best_thresh_v2(out_eval, all_predictions, exact_raw, f1_raw, scores_diff_json, qid_to_has_ans)
 | 
			
		||||
 | 
			
		||||
    return out_eval
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def get_final_text(pred_text, orig_text, do_lower_case, verbose_logging=False):
 | 
			
		||||
    """Project the tokenized prediction back to the original text."""
 | 
			
		||||
 | 
			
		||||
    # When we created the data, we kept track of the alignment between original
 | 
			
		||||
    # (whitespace tokenized) tokens and our WordPiece tokenized tokens. So
 | 
			
		||||
    # now `orig_text` contains the span of our original text corresponding to the
 | 
			
		||||
    # span that we predicted.
 | 
			
		||||
    #
 | 
			
		||||
    # However, `orig_text` may contain extra characters that we don't want in
 | 
			
		||||
    # our prediction.
 | 
			
		||||
    #
 | 
			
		||||
    # For example, let's say:
 | 
			
		||||
    #   pred_text = steve smith
 | 
			
		||||
    #   orig_text = Steve Smith's
 | 
			
		||||
    #
 | 
			
		||||
    # We don't want to return `orig_text` because it contains the extra "'s".
 | 
			
		||||
    #
 | 
			
		||||
    # We don't want to return `pred_text` because it's already been normalized
 | 
			
		||||
    # (the SQuAD eval script also does punctuation stripping/lower casing but
 | 
			
		||||
    # our tokenizer does additional normalization like stripping accent
 | 
			
		||||
    # characters).
 | 
			
		||||
    #
 | 
			
		||||
    # What we really want to return is "Steve Smith".
 | 
			
		||||
    #
 | 
			
		||||
    # Therefore, we have to apply a semi-complicated alignment heuristic between
 | 
			
		||||
    # `pred_text` and `orig_text` to get a character-to-character alignment. This
 | 
			
		||||
    # can fail in certain cases in which case we just return `orig_text`.
 | 
			
		||||
 | 
			
		||||
    def _strip_spaces(text):
 | 
			
		||||
        ns_chars = []
 | 
			
		||||
        ns_to_s_map = collections.OrderedDict()
 | 
			
		||||
        for (i, c) in enumerate(text):
 | 
			
		||||
            if c == " ":
 | 
			
		||||
                continue
 | 
			
		||||
            ns_to_s_map[len(ns_chars)] = i
 | 
			
		||||
            ns_chars.append(c)
 | 
			
		||||
        ns_text = "".join(ns_chars)
 | 
			
		||||
        return (ns_text, ns_to_s_map)
 | 
			
		||||
 | 
			
		||||
    # We first tokenize `orig_text`, strip whitespace from the result
 | 
			
		||||
    # and `pred_text`, and check if they are the same length. If they are
 | 
			
		||||
    # NOT the same length, the heuristic has failed. If they are the same
 | 
			
		||||
    # length, we assume the characters are one-to-one aligned.
 | 
			
		||||
    tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
 | 
			
		||||
 | 
			
		||||
    tok_text = " ".join(tokenizer.tokenize(orig_text))
 | 
			
		||||
 | 
			
		||||
    start_position = tok_text.find(pred_text)
 | 
			
		||||
    if start_position == -1:
 | 
			
		||||
        if verbose_logging:
 | 
			
		||||
            logger.info(
 | 
			
		||||
                "Unable to find text: '%s' in '%s'" % (pred_text, orig_text))
 | 
			
		||||
        return orig_text
 | 
			
		||||
    end_position = start_position + len(pred_text) - 1
 | 
			
		||||
 | 
			
		||||
    (orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text)
 | 
			
		||||
    (tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)
 | 
			
		||||
 | 
			
		||||
    if len(orig_ns_text) != len(tok_ns_text):
 | 
			
		||||
        if verbose_logging:
 | 
			
		||||
            logger.info("Length not equal after stripping spaces: '%s' vs '%s'",
 | 
			
		||||
                        orig_ns_text, tok_ns_text)
 | 
			
		||||
        return orig_text
 | 
			
		||||
 | 
			
		||||
    # We then project the characters in `pred_text` back to `orig_text` using
 | 
			
		||||
    # the character-to-character alignment.
 | 
			
		||||
    tok_s_to_ns_map = {}
 | 
			
		||||
    for (i, tok_index) in tok_ns_to_s_map.items():
 | 
			
		||||
        tok_s_to_ns_map[tok_index] = i
 | 
			
		||||
 | 
			
		||||
    orig_start_position = None
 | 
			
		||||
    if start_position in tok_s_to_ns_map:
 | 
			
		||||
        ns_start_position = tok_s_to_ns_map[start_position]
 | 
			
		||||
        if ns_start_position in orig_ns_to_s_map:
 | 
			
		||||
            orig_start_position = orig_ns_to_s_map[ns_start_position]
 | 
			
		||||
 | 
			
		||||
    if orig_start_position is None:
 | 
			
		||||
        if verbose_logging:
 | 
			
		||||
            logger.info("Couldn't map start position")
 | 
			
		||||
        return orig_text
 | 
			
		||||
 | 
			
		||||
    orig_end_position = None
 | 
			
		||||
    if end_position in tok_s_to_ns_map:
 | 
			
		||||
        ns_end_position = tok_s_to_ns_map[end_position]
 | 
			
		||||
        if ns_end_position in orig_ns_to_s_map:
 | 
			
		||||
            orig_end_position = orig_ns_to_s_map[ns_end_position]
 | 
			
		||||
 | 
			
		||||
    if orig_end_position is None:
 | 
			
		||||
        if verbose_logging:
 | 
			
		||||
            logger.info("Couldn't map end position")
 | 
			
		||||
        return orig_text
 | 
			
		||||
 | 
			
		||||
    output_text = orig_text[orig_start_position:(orig_end_position + 1)]
 | 
			
		||||
    return output_text
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def _get_best_indexes(logits, n_best_size):
 | 
			
		||||
    """Get the n-best logits from a list."""
 | 
			
		||||
    index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True)
 | 
			
		||||
 | 
			
		||||
    best_indexes = []
 | 
			
		||||
    for i in range(len(index_and_score)):
 | 
			
		||||
        if i >= n_best_size:
 | 
			
		||||
            break
 | 
			
		||||
        best_indexes.append(index_and_score[i][0])
 | 
			
		||||
    return best_indexes
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def _compute_softmax(scores):
 | 
			
		||||
    """Compute softmax probability over raw logits."""
 | 
			
		||||
    if not scores:
 | 
			
		||||
        return []
 | 
			
		||||
 | 
			
		||||
    max_score = None
 | 
			
		||||
    for score in scores:
 | 
			
		||||
        if max_score is None or score > max_score:
 | 
			
		||||
            max_score = score
 | 
			
		||||
 | 
			
		||||
    exp_scores = []
 | 
			
		||||
    total_sum = 0.0
 | 
			
		||||
    for score in scores:
 | 
			
		||||
        x = math.exp(score - max_score)
 | 
			
		||||
        exp_scores.append(x)
 | 
			
		||||
        total_sum += x
 | 
			
		||||
 | 
			
		||||
    probs = []
 | 
			
		||||
    for score in exp_scores:
 | 
			
		||||
        probs.append(score / total_sum)
 | 
			
		||||
    return probs
 | 
			
		||||
							
								
								
									
										330
									
								
								examples/utils_squad_evaluate.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						@ -0,0 +1,330 @@
 | 
			
		||||
""" Official evaluation script for SQuAD version 2.0.
 | 
			
		||||
    Modified by XLNet authors to update `find_best_threshold` scripts for SQuAD V2.0
 | 
			
		||||
 | 
			
		||||
In addition to basic functionality, we also compute additional statistics and
 | 
			
		||||
plot precision-recall curves if an additional na_prob.json file is provided.
 | 
			
		||||
This file is expected to map question ID's to the model's predicted probability
 | 
			
		||||
that a question is unanswerable.
 | 
			
		||||
"""
 | 
			
		||||
import argparse
 | 
			
		||||
import collections
 | 
			
		||||
import json
 | 
			
		||||
import numpy as np
 | 
			
		||||
import os
 | 
			
		||||
import re
 | 
			
		||||
import string
 | 
			
		||||
import sys
 | 
			
		||||
 | 
			
		||||
class EVAL_OPTS():
 | 
			
		||||
  def __init__(self, data_file, pred_file, out_file="",
 | 
			
		||||
               na_prob_file="na_prob.json", na_prob_thresh=1.0,
 | 
			
		||||
               out_image_dir=None, verbose=False):
 | 
			
		||||
    self.data_file = data_file
 | 
			
		||||
    self.pred_file = pred_file
 | 
			
		||||
    self.out_file = out_file
 | 
			
		||||
    self.na_prob_file = na_prob_file
 | 
			
		||||
    self.na_prob_thresh = na_prob_thresh
 | 
			
		||||
    self.out_image_dir = out_image_dir
 | 
			
		||||
    self.verbose = verbose
 | 
			
		||||
 | 
			
		||||
OPTS = None
 | 
			
		||||
 | 
			
		||||
def parse_args():
 | 
			
		||||
  parser = argparse.ArgumentParser('Official evaluation script for SQuAD version 2.0.')
 | 
			
		||||
  parser.add_argument('data_file', metavar='data.json', help='Input data JSON file.')
 | 
			
		||||
  parser.add_argument('pred_file', metavar='pred.json', help='Model predictions.')
 | 
			
		||||
  parser.add_argument('--out-file', '-o', metavar='eval.json',
 | 
			
		||||
                      help='Write accuracy metrics to file (default is stdout).')
 | 
			
		||||
  parser.add_argument('--na-prob-file', '-n', metavar='na_prob.json',
 | 
			
		||||
                      help='Model estimates of probability of no answer.')
 | 
			
		||||
  parser.add_argument('--na-prob-thresh', '-t', type=float, default=1.0,
 | 
			
		||||
                      help='Predict "" if no-answer probability exceeds this (default = 1.0).')
 | 
			
		||||
  parser.add_argument('--out-image-dir', '-p', metavar='out_images', default=None,
 | 
			
		||||
                      help='Save precision-recall curves to directory.')
 | 
			
		||||
  parser.add_argument('--verbose', '-v', action='store_true')
 | 
			
		||||
  if len(sys.argv) == 1:
 | 
			
		||||
    parser.print_help()
 | 
			
		||||
    sys.exit(1)
 | 
			
		||||
  return parser.parse_args()
 | 
			
		||||
 | 
			
		||||
def make_qid_to_has_ans(dataset):
 | 
			
		||||
  qid_to_has_ans = {}
 | 
			
		||||
  for article in dataset:
 | 
			
		||||
    for p in article['paragraphs']:
 | 
			
		||||
      for qa in p['qas']:
 | 
			
		||||
        qid_to_has_ans[qa['id']] = bool(qa['answers'])
 | 
			
		||||
  return qid_to_has_ans
 | 
			
		||||
 | 
			
		||||
def normalize_answer(s):
 | 
			
		||||
  """Lower text and remove punctuation, articles and extra whitespace."""
 | 
			
		||||
  def remove_articles(text):
 | 
			
		||||
    regex = re.compile(r'\b(a|an|the)\b', re.UNICODE)
 | 
			
		||||
    return re.sub(regex, ' ', text)
 | 
			
		||||
  def white_space_fix(text):
 | 
			
		||||
    return ' '.join(text.split())
 | 
			
		||||
  def remove_punc(text):
 | 
			
		||||
    exclude = set(string.punctuation)
 | 
			
		||||
    return ''.join(ch for ch in text if ch not in exclude)
 | 
			
		||||
  def lower(text):
 | 
			
		||||
    return text.lower()
 | 
			
		||||
  return white_space_fix(remove_articles(remove_punc(lower(s))))
 | 
			
		||||
 | 
			
		||||
def get_tokens(s):
 | 
			
		||||
  if not s: return []
 | 
			
		||||
  return normalize_answer(s).split()
 | 
			
		||||
 | 
			
		||||
def compute_exact(a_gold, a_pred):
 | 
			
		||||
  return int(normalize_answer(a_gold) == normalize_answer(a_pred))
 | 
			
		||||
 | 
			
		||||
def compute_f1(a_gold, a_pred):
 | 
			
		||||
  gold_toks = get_tokens(a_gold)
 | 
			
		||||
  pred_toks = get_tokens(a_pred)
 | 
			
		||||
  common = collections.Counter(gold_toks) & collections.Counter(pred_toks)
 | 
			
		||||
  num_same = sum(common.values())
 | 
			
		||||
  if len(gold_toks) == 0 or len(pred_toks) == 0:
 | 
			
		||||
    # If either is no-answer, then F1 is 1 if they agree, 0 otherwise
 | 
			
		||||
    return int(gold_toks == pred_toks)
 | 
			
		||||
  if num_same == 0:
 | 
			
		||||
    return 0
 | 
			
		||||
  precision = 1.0 * num_same / len(pred_toks)
 | 
			
		||||
  recall = 1.0 * num_same / len(gold_toks)
 | 
			
		||||
  f1 = (2 * precision * recall) / (precision + recall)
 | 
			
		||||
  return f1
 | 
			
		||||
 | 
			
		||||
def get_raw_scores(dataset, preds):
 | 
			
		||||
  exact_scores = {}
 | 
			
		||||
  f1_scores = {}
 | 
			
		||||
  for article in dataset:
 | 
			
		||||
    for p in article['paragraphs']:
 | 
			
		||||
      for qa in p['qas']:
 | 
			
		||||
        qid = qa['id']
 | 
			
		||||
        gold_answers = [a['text'] for a in qa['answers']
 | 
			
		||||
                        if normalize_answer(a['text'])]
 | 
			
		||||
        if not gold_answers:
 | 
			
		||||
          # For unanswerable questions, only correct answer is empty string
 | 
			
		||||
          gold_answers = ['']
 | 
			
		||||
        if qid not in preds:
 | 
			
		||||
          print('Missing prediction for %s' % qid)
 | 
			
		||||
          continue
 | 
			
		||||
        a_pred = preds[qid]
 | 
			
		||||
        # Take max over all gold answers
 | 
			
		||||
        exact_scores[qid] = max(compute_exact(a, a_pred) for a in gold_answers)
 | 
			
		||||
        f1_scores[qid] = max(compute_f1(a, a_pred) for a in gold_answers)
 | 
			
		||||
  return exact_scores, f1_scores
 | 
			
		||||
 | 
			
		||||
def apply_no_ans_threshold(scores, na_probs, qid_to_has_ans, na_prob_thresh):
 | 
			
		||||
  new_scores = {}
 | 
			
		||||
  for qid, s in scores.items():
 | 
			
		||||
    pred_na = na_probs[qid] > na_prob_thresh
 | 
			
		||||
    if pred_na:
 | 
			
		||||
      new_scores[qid] = float(not qid_to_has_ans[qid])
 | 
			
		||||
    else:
 | 
			
		||||
      new_scores[qid] = s
 | 
			
		||||
  return new_scores
 | 
			
		||||
 | 
			
		||||
def make_eval_dict(exact_scores, f1_scores, qid_list=None):
 | 
			
		||||
  if not qid_list:
 | 
			
		||||
    total = len(exact_scores)
 | 
			
		||||
    return collections.OrderedDict([
 | 
			
		||||
        ('exact', 100.0 * sum(exact_scores.values()) / total),
 | 
			
		||||
        ('f1', 100.0 * sum(f1_scores.values()) / total),
 | 
			
		||||
        ('total', total),
 | 
			
		||||
    ])
 | 
			
		||||
  else:
 | 
			
		||||
    total = len(qid_list)
 | 
			
		||||
    return collections.OrderedDict([
 | 
			
		||||
        ('exact', 100.0 * sum(exact_scores[k] for k in qid_list) / total),
 | 
			
		||||
        ('f1', 100.0 * sum(f1_scores[k] for k in qid_list) / total),
 | 
			
		||||
        ('total', total),
 | 
			
		||||
    ])
 | 
			
		||||
 | 
			
		||||
def merge_eval(main_eval, new_eval, prefix):
 | 
			
		||||
  for k in new_eval:
 | 
			
		||||
    main_eval['%s_%s' % (prefix, k)] = new_eval[k]
 | 
			
		||||
 | 
			
		||||
def plot_pr_curve(precisions, recalls, out_image, title):
 | 
			
		||||
  plt.step(recalls, precisions, color='b', alpha=0.2, where='post')
 | 
			
		||||
  plt.fill_between(recalls, precisions, step='post', alpha=0.2, color='b')
 | 
			
		||||
  plt.xlabel('Recall')
 | 
			
		||||
  plt.ylabel('Precision')
 | 
			
		||||
  plt.xlim([0.0, 1.05])
 | 
			
		||||
  plt.ylim([0.0, 1.05])
 | 
			
		||||
  plt.title(title)
 | 
			
		||||
  plt.savefig(out_image)
 | 
			
		||||
  plt.clf()
 | 
			
		||||
 | 
			
		||||
def make_precision_recall_eval(scores, na_probs, num_true_pos, qid_to_has_ans,
 | 
			
		||||
                               out_image=None, title=None):
 | 
			
		||||
  qid_list = sorted(na_probs, key=lambda k: na_probs[k])
 | 
			
		||||
  true_pos = 0.0
 | 
			
		||||
  cur_p = 1.0
 | 
			
		||||
  cur_r = 0.0
 | 
			
		||||
  precisions = [1.0]
 | 
			
		||||
  recalls = [0.0]
 | 
			
		||||
  avg_prec = 0.0
 | 
			
		||||
  for i, qid in enumerate(qid_list):
 | 
			
		||||
    if qid_to_has_ans[qid]:
 | 
			
		||||
      true_pos += scores[qid]
 | 
			
		||||
    cur_p = true_pos / float(i+1)
 | 
			
		||||
    cur_r = true_pos / float(num_true_pos)
 | 
			
		||||
    if i == len(qid_list) - 1 or na_probs[qid] != na_probs[qid_list[i+1]]:
 | 
			
		||||
      # i.e., if we can put a threshold after this point
 | 
			
		||||
      avg_prec += cur_p * (cur_r - recalls[-1])
 | 
			
		||||
      precisions.append(cur_p)
 | 
			
		||||
      recalls.append(cur_r)
 | 
			
		||||
  if out_image:
 | 
			
		||||
    plot_pr_curve(precisions, recalls, out_image, title)
 | 
			
		||||
  return {'ap': 100.0 * avg_prec}
 | 
			
		||||
 | 
			
		||||
def run_precision_recall_analysis(main_eval, exact_raw, f1_raw, na_probs, 
 | 
			
		||||
                                  qid_to_has_ans, out_image_dir):
 | 
			
		||||
  if out_image_dir and not os.path.exists(out_image_dir):
 | 
			
		||||
    os.makedirs(out_image_dir)
 | 
			
		||||
  num_true_pos = sum(1 for v in qid_to_has_ans.values() if v)
 | 
			
		||||
  if num_true_pos == 0:
 | 
			
		||||
    return
 | 
			
		||||
  pr_exact = make_precision_recall_eval(
 | 
			
		||||
      exact_raw, na_probs, num_true_pos, qid_to_has_ans,
 | 
			
		||||
      out_image=os.path.join(out_image_dir, 'pr_exact.png'),
 | 
			
		||||
      title='Precision-Recall curve for Exact Match score')
 | 
			
		||||
  pr_f1 = make_precision_recall_eval(
 | 
			
		||||
      f1_raw, na_probs, num_true_pos, qid_to_has_ans,
 | 
			
		||||
      out_image=os.path.join(out_image_dir, 'pr_f1.png'),
 | 
			
		||||
      title='Precision-Recall curve for F1 score')
 | 
			
		||||
  oracle_scores = {k: float(v) for k, v in qid_to_has_ans.items()}
 | 
			
		||||
  pr_oracle = make_precision_recall_eval(
 | 
			
		||||
      oracle_scores, na_probs, num_true_pos, qid_to_has_ans,
 | 
			
		||||
      out_image=os.path.join(out_image_dir, 'pr_oracle.png'),
 | 
			
		||||
      title='Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)')
 | 
			
		||||
  merge_eval(main_eval, pr_exact, 'pr_exact')
 | 
			
		||||
  merge_eval(main_eval, pr_f1, 'pr_f1')
 | 
			
		||||
  merge_eval(main_eval, pr_oracle, 'pr_oracle')
 | 
			
		||||
 | 
			
		||||
def histogram_na_prob(na_probs, qid_list, image_dir, name):
 | 
			
		||||
  if not qid_list:
 | 
			
		||||
    return
 | 
			
		||||
  x = [na_probs[k] for k in qid_list]
 | 
			
		||||
  weights = np.ones_like(x) / float(len(x))
 | 
			
		||||
  plt.hist(x, weights=weights, bins=20, range=(0.0, 1.0))
 | 
			
		||||
  plt.xlabel('Model probability of no-answer')
 | 
			
		||||
  plt.ylabel('Proportion of dataset')
 | 
			
		||||
  plt.title('Histogram of no-answer probability: %s' % name)
 | 
			
		||||
  plt.savefig(os.path.join(image_dir, 'na_prob_hist_%s.png' % name))
 | 
			
		||||
  plt.clf()
 | 
			
		||||
 | 
			
		||||
def find_best_thresh(preds, scores, na_probs, qid_to_has_ans):
 | 
			
		||||
  num_no_ans = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k])
 | 
			
		||||
  cur_score = num_no_ans
 | 
			
		||||
  best_score = cur_score
 | 
			
		||||
  best_thresh = 0.0
 | 
			
		||||
  qid_list = sorted(na_probs, key=lambda k: na_probs[k])
 | 
			
		||||
  for i, qid in enumerate(qid_list):
 | 
			
		||||
    if qid not in scores: continue
 | 
			
		||||
    if qid_to_has_ans[qid]:
 | 
			
		||||
      diff = scores[qid]
 | 
			
		||||
    else:
 | 
			
		||||
      if preds[qid]:
 | 
			
		||||
        diff = -1
 | 
			
		||||
      else:
 | 
			
		||||
        diff = 0
 | 
			
		||||
    cur_score += diff
 | 
			
		||||
    if cur_score > best_score:
 | 
			
		||||
      best_score = cur_score
 | 
			
		||||
      best_thresh = na_probs[qid]
 | 
			
		||||
  return 100.0 * best_score / len(scores), best_thresh
 | 
			
		||||
 | 
			
		||||
def find_best_thresh_v2(preds, scores, na_probs, qid_to_has_ans):
 | 
			
		||||
  num_no_ans = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k])
 | 
			
		||||
  cur_score = num_no_ans
 | 
			
		||||
  best_score = cur_score
 | 
			
		||||
  best_thresh = 0.0
 | 
			
		||||
  qid_list = sorted(na_probs, key=lambda k: na_probs[k])
 | 
			
		||||
  for i, qid in enumerate(qid_list):
 | 
			
		||||
    if qid not in scores: continue
 | 
			
		||||
    if qid_to_has_ans[qid]:
 | 
			
		||||
      diff = scores[qid]
 | 
			
		||||
    else:
 | 
			
		||||
      if preds[qid]:
 | 
			
		||||
        diff = -1
 | 
			
		||||
      else:
 | 
			
		||||
        diff = 0
 | 
			
		||||
    cur_score += diff
 | 
			
		||||
    if cur_score > best_score:
 | 
			
		||||
      best_score = cur_score
 | 
			
		||||
      best_thresh = na_probs[qid]
 | 
			
		||||
 | 
			
		||||
  has_ans_score, has_ans_cnt = 0, 0
 | 
			
		||||
  for qid in qid_list:
 | 
			
		||||
    if not qid_to_has_ans[qid]: continue
 | 
			
		||||
    has_ans_cnt += 1
 | 
			
		||||
 | 
			
		||||
    if qid not in scores: continue
 | 
			
		||||
    has_ans_score += scores[qid]
 | 
			
		||||
 | 
			
		||||
  return 100.0 * best_score / len(scores), best_thresh, 1.0 * has_ans_score / has_ans_cnt
 | 
			
		||||
 | 
			
		||||
def find_all_best_thresh(main_eval, preds, exact_raw, f1_raw, na_probs, qid_to_has_ans):
 | 
			
		||||
  best_exact, exact_thresh = find_best_thresh(preds, exact_raw, na_probs, qid_to_has_ans)
 | 
			
		||||
  best_f1, f1_thresh = find_best_thresh(preds, f1_raw, na_probs, qid_to_has_ans)
 | 
			
		||||
  main_eval['best_exact'] = best_exact
 | 
			
		||||
  main_eval['best_exact_thresh'] = exact_thresh
 | 
			
		||||
  main_eval['best_f1'] = best_f1
 | 
			
		||||
  main_eval['best_f1_thresh'] = f1_thresh
 | 
			
		||||
 | 
			
		||||
def find_all_best_thresh_v2(main_eval, preds, exact_raw, f1_raw, na_probs, qid_to_has_ans):
 | 
			
		||||
  best_exact, exact_thresh, has_ans_exact = find_best_thresh_v2(preds, exact_raw, na_probs, qid_to_has_ans)
 | 
			
		||||
  best_f1, f1_thresh, has_ans_f1 = find_best_thresh_v2(preds, f1_raw, na_probs, qid_to_has_ans)
 | 
			
		||||
  main_eval['best_exact'] = best_exact
 | 
			
		||||
  main_eval['best_exact_thresh'] = exact_thresh
 | 
			
		||||
  main_eval['best_f1'] = best_f1
 | 
			
		||||
  main_eval['best_f1_thresh'] = f1_thresh
 | 
			
		||||
  main_eval['has_ans_exact'] = has_ans_exact
 | 
			
		||||
  main_eval['has_ans_f1'] = has_ans_f1
 | 
			
		||||
 | 
			
		||||
def main(OPTS):
 | 
			
		||||
  with open(OPTS.data_file) as f:
 | 
			
		||||
    dataset_json = json.load(f)
 | 
			
		||||
    dataset = dataset_json['data']
 | 
			
		||||
  with open(OPTS.pred_file) as f:
 | 
			
		||||
    preds = json.load(f)
 | 
			
		||||
  if OPTS.na_prob_file:
 | 
			
		||||
    with open(OPTS.na_prob_file) as f:
 | 
			
		||||
      na_probs = json.load(f)
 | 
			
		||||
  else:
 | 
			
		||||
    na_probs = {k: 0.0 for k in preds}
 | 
			
		||||
  qid_to_has_ans = make_qid_to_has_ans(dataset)  # maps qid to True/False
 | 
			
		||||
  has_ans_qids = [k for k, v in qid_to_has_ans.items() if v]
 | 
			
		||||
  no_ans_qids = [k for k, v in qid_to_has_ans.items() if not v]
 | 
			
		||||
  exact_raw, f1_raw = get_raw_scores(dataset, preds)
 | 
			
		||||
  exact_thresh = apply_no_ans_threshold(exact_raw, na_probs, qid_to_has_ans,
 | 
			
		||||
                                        OPTS.na_prob_thresh)
 | 
			
		||||
  f1_thresh = apply_no_ans_threshold(f1_raw, na_probs, qid_to_has_ans,
 | 
			
		||||
                                     OPTS.na_prob_thresh)
 | 
			
		||||
  out_eval = make_eval_dict(exact_thresh, f1_thresh)
 | 
			
		||||
  if has_ans_qids:
 | 
			
		||||
    has_ans_eval = make_eval_dict(exact_thresh, f1_thresh, qid_list=has_ans_qids)
 | 
			
		||||
    merge_eval(out_eval, has_ans_eval, 'HasAns')
 | 
			
		||||
  if no_ans_qids:
 | 
			
		||||
    no_ans_eval = make_eval_dict(exact_thresh, f1_thresh, qid_list=no_ans_qids)
 | 
			
		||||
    merge_eval(out_eval, no_ans_eval, 'NoAns')
 | 
			
		||||
  if OPTS.na_prob_file:
 | 
			
		||||
    find_all_best_thresh(out_eval, preds, exact_raw, f1_raw, na_probs, qid_to_has_ans)
 | 
			
		||||
  if OPTS.na_prob_file and OPTS.out_image_dir:
 | 
			
		||||
    run_precision_recall_analysis(out_eval, exact_raw, f1_raw, na_probs, 
 | 
			
		||||
                                  qid_to_has_ans, OPTS.out_image_dir)
 | 
			
		||||
    histogram_na_prob(na_probs, has_ans_qids, OPTS.out_image_dir, 'hasAns')
 | 
			
		||||
    histogram_na_prob(na_probs, no_ans_qids, OPTS.out_image_dir, 'noAns')
 | 
			
		||||
  if OPTS.out_file:
 | 
			
		||||
    with open(OPTS.out_file, 'w') as f:
 | 
			
		||||
      json.dump(out_eval, f)
 | 
			
		||||
  else:
 | 
			
		||||
    print(json.dumps(out_eval, indent=2))
 | 
			
		||||
  return out_eval
 | 
			
		||||
 | 
			
		||||
if __name__ == '__main__':
 | 
			
		||||
  OPTS = parse_args()
 | 
			
		||||
  if OPTS.out_image_dir:
 | 
			
		||||
    import matplotlib
 | 
			
		||||
    matplotlib.use('Agg')
 | 
			
		||||
    import matplotlib.pyplot as plt 
 | 
			
		||||
  main(OPTS)
 | 
			
		||||
							
								
								
									
										275
									
								
								hubconf.py
									
									
									
									
									
								
							
							
						
						@ -1,187 +1,112 @@
 | 
			
		||||
from pytorch_pretrained_bert.tokenization import BertTokenizer
 | 
			
		||||
from pytorch_pretrained_bert.modeling import (
 | 
			
		||||
        BertModel,
 | 
			
		||||
        BertForNextSentencePrediction,
 | 
			
		||||
        BertForMaskedLM,
 | 
			
		||||
        BertForMultipleChoice,
 | 
			
		||||
        BertForPreTraining,
 | 
			
		||||
        BertForQuestionAnswering,
 | 
			
		||||
        BertForSequenceClassification,
 | 
			
		||||
        BertForTokenClassification,
 | 
			
		||||
        )
 | 
			
		||||
from transformers import (
 | 
			
		||||
    AutoTokenizer, AutoConfig, AutoModel, AutoModelWithLMHead, AutoModelForSequenceClassification, AutoModelForQuestionAnswering
 | 
			
		||||
)
 | 
			
		||||
from transformers.file_utils import add_start_docstrings
 | 
			
		||||
 | 
			
		||||
dependencies = ['torch', 'tqdm', 'boto3', 'requests', 'regex']
 | 
			
		||||
dependencies = ['torch', 'tqdm', 'boto3', 'requests', 'regex', 'sentencepiece', 'sacremoses']
 | 
			
		||||
 | 
			
		||||
# A lot of models share the same param doc. Use a decorator
 | 
			
		||||
# to save typing
 | 
			
		||||
bert_docstring = """
 | 
			
		||||
    Params:
 | 
			
		||||
        pretrained_model_name_or_path: either:
 | 
			
		||||
            - a str with the name of a pre-trained model to load
 | 
			
		||||
                . `bert-base-uncased`
 | 
			
		||||
                . `bert-large-uncased`
 | 
			
		||||
                . `bert-base-cased`
 | 
			
		||||
                . `bert-large-cased`
 | 
			
		||||
                . `bert-base-multilingual-uncased`
 | 
			
		||||
                . `bert-base-multilingual-cased`
 | 
			
		||||
                . `bert-base-chinese`
 | 
			
		||||
            - a path or url to a pretrained model archive containing:
 | 
			
		||||
                . `bert_config.json` a configuration file for the model
 | 
			
		||||
                . `pytorch_model.bin` a PyTorch dump of a BertForPreTraining
 | 
			
		||||
                  instance
 | 
			
		||||
            - a path or url to a pretrained model archive containing:
 | 
			
		||||
                . `bert_config.json` a configuration file for the model
 | 
			
		||||
                . `model.chkpt` a TensorFlow checkpoint
 | 
			
		||||
        from_tf: should we load the weights from a locally saved TensorFlow
 | 
			
		||||
                 checkpoint
 | 
			
		||||
        cache_dir: an optional path to a folder in which the pre-trained models
 | 
			
		||||
                   will be cached.
 | 
			
		||||
        state_dict: an optional state dictionnary
 | 
			
		||||
                    (collections.OrderedDict object) to use instead of Google
 | 
			
		||||
                    pre-trained models
 | 
			
		||||
        *inputs, **kwargs: additional input for the specific Bert class
 | 
			
		||||
            (ex: num_labels for BertForSequenceClassification)
 | 
			
		||||
"""
 | 
			
		||||
@add_start_docstrings(AutoConfig.__doc__)
 | 
			
		||||
def config(*args, **kwargs):
 | 
			
		||||
    r""" 
 | 
			
		||||
                # Using torch.hub !
 | 
			
		||||
                import torch
 | 
			
		||||
 | 
			
		||||
                config = torch.hub.load('huggingface/transformers', 'config', 'bert-base-uncased')  # Download configuration from S3 and cache.
 | 
			
		||||
                config = torch.hub.load('huggingface/transformers', 'config', './test/bert_saved_model/')  # E.g. config (or model) was saved using `save_pretrained('./test/saved_model/')`
 | 
			
		||||
                config = torch.hub.load('huggingface/transformers', 'config', './test/bert_saved_model/my_configuration.json')
 | 
			
		||||
                config = torch.hub.load('huggingface/transformers', 'config', 'bert-base-uncased', output_attention=True, foo=False)
 | 
			
		||||
                assert config.output_attention == True
 | 
			
		||||
                config, unused_kwargs = torch.hub.load('huggingface/transformers', 'config', 'bert-base-uncased', output_attention=True, foo=False, return_unused_kwargs=True)
 | 
			
		||||
                assert config.output_attention == True
 | 
			
		||||
                assert unused_kwargs == {'foo': False}
 | 
			
		||||
 | 
			
		||||
            """
 | 
			
		||||
 | 
			
		||||
    return AutoConfig.from_pretrained(*args, **kwargs)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def _append_from_pretrained_docstring(docstr):
 | 
			
		||||
    def docstring_decorator(fn):
 | 
			
		||||
        fn.__doc__ = fn.__doc__ + docstr
 | 
			
		||||
        return fn
 | 
			
		||||
    return docstring_decorator
 | 
			
		||||
@add_start_docstrings(AutoTokenizer.__doc__)
 | 
			
		||||
def tokenizer(*args, **kwargs):
 | 
			
		||||
    r""" 
 | 
			
		||||
        # Using torch.hub !
 | 
			
		||||
        import torch
 | 
			
		||||
 | 
			
		||||
        tokenizer = torch.hub.load('huggingface/transformers', 'tokenizer', 'bert-base-uncased')    # Download vocabulary from S3 and cache.
 | 
			
		||||
        tokenizer = torch.hub.load('huggingface/transformers', 'tokenizer', './test/bert_saved_model/')  # E.g. tokenizer was saved using `save_pretrained('./test/saved_model/')`
 | 
			
		||||
 | 
			
		||||
def bertTokenizer(*args, **kwargs):
 | 
			
		||||
    """
 | 
			
		||||
    Instantiate a BertTokenizer from a pre-trained/customized vocab file
 | 
			
		||||
    Args:
 | 
			
		||||
    pretrained_model_name_or_path: Path to pretrained model archive
 | 
			
		||||
                                   or one of pre-trained vocab configs below.
 | 
			
		||||
                                       * bert-base-uncased
 | 
			
		||||
                                       * bert-large-uncased
 | 
			
		||||
                                       * bert-base-cased
 | 
			
		||||
                                       * bert-large-cased
 | 
			
		||||
                                       * bert-base-multilingual-uncased
 | 
			
		||||
                                       * bert-base-multilingual-cased
 | 
			
		||||
                                       * bert-base-chinese
 | 
			
		||||
    Keyword args:
 | 
			
		||||
    cache_dir: an optional path to a specific directory to download and cache
 | 
			
		||||
               the pre-trained model weights.
 | 
			
		||||
               Default: None
 | 
			
		||||
    do_lower_case: Whether to lower case the input.
 | 
			
		||||
                   Only has an effect when do_wordpiece_only=False
 | 
			
		||||
                   Default: True
 | 
			
		||||
    do_basic_tokenize: Whether to do basic tokenization before wordpiece.
 | 
			
		||||
                       Default: True
 | 
			
		||||
    max_len: An artificial maximum length to truncate tokenized sequences to;
 | 
			
		||||
             Effective maximum length is always the minimum of this
 | 
			
		||||
             value (if specified) and the underlying BERT model's
 | 
			
		||||
             sequence length.
 | 
			
		||||
             Default: None
 | 
			
		||||
    never_split: List of tokens which will never be split during tokenization.
 | 
			
		||||
                 Only has an effect when do_wordpiece_only=False
 | 
			
		||||
                 Default: ["[UNK]", "[SEP]", "[PAD]", "[CLS]", "[MASK]"]
 | 
			
		||||
 | 
			
		||||
    Example:
 | 
			
		||||
        >>> sentence = 'Hello, World!'
 | 
			
		||||
        >>> tokenizer = torch.hub.load('ailzhang/pytorch-pretrained-BERT:hubconf', 'bertTokenizer', 'bert-base-cased', do_basic_tokenize=False, force_reload=False)
 | 
			
		||||
        >>> toks = tokenizer.tokenize(sentence)
 | 
			
		||||
        ['Hello', '##,', 'World', '##!']
 | 
			
		||||
        >>> ids = tokenizer.convert_tokens_to_ids(toks)
 | 
			
		||||
        [8667, 28136, 1291, 28125]
 | 
			
		||||
    return AutoTokenizer.from_pretrained(*args, **kwargs)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@add_start_docstrings(AutoModel.__doc__)
 | 
			
		||||
def model(*args, **kwargs):
 | 
			
		||||
    r"""
 | 
			
		||||
            # Using torch.hub !
 | 
			
		||||
            import torch
 | 
			
		||||
 | 
			
		||||
            model = torch.hub.load('huggingface/transformers', 'model', 'bert-base-uncased')    # Download model and configuration from S3 and cache.
 | 
			
		||||
            model = torch.hub.load('huggingface/transformers', 'model', './test/bert_model/')  # E.g. model was saved using `save_pretrained('./test/saved_model/')`
 | 
			
		||||
            model = torch.hub.load('huggingface/transformers', 'model', 'bert-base-uncased', output_attention=True)  # Update configuration during loading
 | 
			
		||||
            assert model.config.output_attention == True
 | 
			
		||||
            # Loading from a TF checkpoint file instead of a PyTorch model (slower)
 | 
			
		||||
            config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json')
 | 
			
		||||
            model = torch.hub.load('huggingface/transformers', 'model', './tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
 | 
			
		||||
 | 
			
		||||
        """
 | 
			
		||||
 | 
			
		||||
    return AutoModel.from_pretrained(*args, **kwargs)
 | 
			
		||||
 | 
			
		||||
@add_start_docstrings(AutoModelWithLMHead.__doc__)
 | 
			
		||||
def modelWithLMHead(*args, **kwargs):
 | 
			
		||||
    r"""
 | 
			
		||||
        # Using torch.hub !
 | 
			
		||||
        import torch
 | 
			
		||||
 | 
			
		||||
        model = torch.hub.load('huggingface/transformers', 'modelWithLMHead', 'bert-base-uncased')    # Download model and configuration from S3 and cache.
 | 
			
		||||
        model = torch.hub.load('huggingface/transformers', 'modelWithLMHead', './test/bert_model/')  # E.g. model was saved using `save_pretrained('./test/saved_model/')`
 | 
			
		||||
        model = torch.hub.load('huggingface/transformers', 'modelWithLMHead', 'bert-base-uncased', output_attention=True)  # Update configuration during loading
 | 
			
		||||
        assert model.config.output_attention == True
 | 
			
		||||
        # Loading from a TF checkpoint file instead of a PyTorch model (slower)
 | 
			
		||||
        config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json')
 | 
			
		||||
        model = torch.hub.load('huggingface/transformers', 'modelWithLMHead', './tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
 | 
			
		||||
 | 
			
		||||
    """
 | 
			
		||||
    tokenizer = BertTokenizer.from_pretrained(*args, **kwargs)
 | 
			
		||||
    return tokenizer
 | 
			
		||||
    return AutoModelWithLMHead.from_pretrained(*args, **kwargs)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@_append_from_pretrained_docstring(bert_docstring)
 | 
			
		||||
def bertModel(*args, **kwargs):
 | 
			
		||||
@add_start_docstrings(AutoModelForSequenceClassification.__doc__)
 | 
			
		||||
def modelForSequenceClassification(*args, **kwargs):
 | 
			
		||||
    r"""
 | 
			
		||||
            # Using torch.hub !
 | 
			
		||||
            import torch
 | 
			
		||||
 | 
			
		||||
            model = torch.hub.load('huggingface/transformers', 'modelForSequenceClassification', 'bert-base-uncased')    # Download model and configuration from S3 and cache.
 | 
			
		||||
            model = torch.hub.load('huggingface/transformers', 'modelForSequenceClassification', './test/bert_model/')  # E.g. model was saved using `save_pretrained('./test/saved_model/')`
 | 
			
		||||
            model = torch.hub.load('huggingface/transformers', 'modelForSequenceClassification', 'bert-base-uncased', output_attention=True)  # Update configuration during loading
 | 
			
		||||
            assert model.config.output_attention == True
 | 
			
		||||
            # Loading from a TF checkpoint file instead of a PyTorch model (slower)
 | 
			
		||||
            config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json')
 | 
			
		||||
            model = torch.hub.load('huggingface/transformers', 'modelForSequenceClassification', './tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
 | 
			
		||||
 | 
			
		||||
        """
 | 
			
		||||
 | 
			
		||||
    return AutoModelForSequenceClassification.from_pretrained(*args, **kwargs)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__)
 | 
			
		||||
def modelForQuestionAnswering(*args, **kwargs):
 | 
			
		||||
    r"""
 | 
			
		||||
        # Using torch.hub !
 | 
			
		||||
        import torch
 | 
			
		||||
 | 
			
		||||
        model = torch.hub.load('huggingface/transformers', 'modelForQuestionAnswering', 'bert-base-uncased')    # Download model and configuration from S3 and cache.
 | 
			
		||||
        model = torch.hub.load('huggingface/transformers', 'modelForQuestionAnswering', './test/bert_model/')  # E.g. model was saved using `save_pretrained('./test/saved_model/')`
 | 
			
		||||
        model = torch.hub.load('huggingface/transformers', 'modelForQuestionAnswering', 'bert-base-uncased', output_attention=True)  # Update configuration during loading
 | 
			
		||||
        assert model.config.output_attention == True
 | 
			
		||||
        # Loading from a TF checkpoint file instead of a PyTorch model (slower)
 | 
			
		||||
        config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json')
 | 
			
		||||
        model = torch.hub.load('huggingface/transformers', 'modelForQuestionAnswering', './tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
 | 
			
		||||
 | 
			
		||||
    """
 | 
			
		||||
    BertModel is the basic BERT Transformer model with a layer of summed token,
 | 
			
		||||
    position and sequence embeddings followed by a series of identical
 | 
			
		||||
    self-attention blocks (12 for BERT-base, 24 for BERT-large).
 | 
			
		||||
    """
 | 
			
		||||
    model = BertModel.from_pretrained(*args, **kwargs)
 | 
			
		||||
    return model
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@_append_from_pretrained_docstring(bert_docstring)
 | 
			
		||||
def bertForNextSentencePrediction(*args, **kwargs):
 | 
			
		||||
    """
 | 
			
		||||
    BERT model with next sentence prediction head.
 | 
			
		||||
    This module comprises the BERT model followed by the next sentence
 | 
			
		||||
    classification head.
 | 
			
		||||
    """
 | 
			
		||||
    model = BertForNextSentencePrediction.from_pretrained(*args, **kwargs)
 | 
			
		||||
    return model
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@_append_from_pretrained_docstring(bert_docstring)
 | 
			
		||||
def bertForPreTraining(*args, **kwargs):
 | 
			
		||||
    """
 | 
			
		||||
    BERT model with pre-training heads.
 | 
			
		||||
    This module comprises the BERT model followed by the two pre-training heads
 | 
			
		||||
        - the masked language modeling head, and
 | 
			
		||||
        - the next sentence classification head.
 | 
			
		||||
    """
 | 
			
		||||
    model = BertForPreTraining.from_pretrained(*args, **kwargs)
 | 
			
		||||
    return model
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@_append_from_pretrained_docstring(bert_docstring)
 | 
			
		||||
def bertForMaskedLM(*args, **kwargs):
 | 
			
		||||
    """
 | 
			
		||||
    BertForMaskedLM includes the BertModel Transformer followed by the
 | 
			
		||||
    (possibly) pre-trained masked language modeling head.
 | 
			
		||||
    """
 | 
			
		||||
    model = BertForMaskedLM.from_pretrained(*args, **kwargs)
 | 
			
		||||
    return model
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@_append_from_pretrained_docstring(bert_docstring)
 | 
			
		||||
def bertForSequenceClassification(*args, **kwargs):
 | 
			
		||||
    """
 | 
			
		||||
    BertForSequenceClassification is a fine-tuning model that includes
 | 
			
		||||
    BertModel and a sequence-level (sequence or pair of sequences) classifier
 | 
			
		||||
    on top of the BertModel.
 | 
			
		||||
 | 
			
		||||
    The sequence-level classifier is a linear layer that takes as input the
 | 
			
		||||
    last hidden state of the first character in the input sequence
 | 
			
		||||
    (see Figures 3a and 3b in the BERT paper).
 | 
			
		||||
    """
 | 
			
		||||
    model = BertForSequenceClassification.from_pretrained(*args, **kwargs)
 | 
			
		||||
    return model
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@_append_from_pretrained_docstring(bert_docstring)
 | 
			
		||||
def bertForMultipleChoice(*args, **kwargs):
 | 
			
		||||
    """
 | 
			
		||||
    BertForMultipleChoice is a fine-tuning model that includes BertModel and a
 | 
			
		||||
    linear layer on top of the BertModel.
 | 
			
		||||
    """
 | 
			
		||||
    model = BertForMultipleChoice.from_pretrained(*args, **kwargs)
 | 
			
		||||
    return model
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@_append_from_pretrained_docstring(bert_docstring)
 | 
			
		||||
def bertForQuestionAnswering(*args, **kwargs):
 | 
			
		||||
    """
 | 
			
		||||
    BertForQuestionAnswering is a fine-tuning model that includes BertModel
 | 
			
		||||
    with a token-level classifiers on top of the full sequence of last hidden
 | 
			
		||||
    states.
 | 
			
		||||
    """
 | 
			
		||||
    model = BertForQuestionAnswering.from_pretrained(*args, **kwargs)
 | 
			
		||||
    return model
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@_append_from_pretrained_docstring(bert_docstring)
 | 
			
		||||
def bertForTokenClassification(*args, **kwargs):
 | 
			
		||||
    """
 | 
			
		||||
    BertForTokenClassification is a fine-tuning model that includes BertModel
 | 
			
		||||
    and a token-level classifier on top of the BertModel.
 | 
			
		||||
 | 
			
		||||
    The token-level classifier is a linear layer that takes as input the last
 | 
			
		||||
    hidden state of the sequence.
 | 
			
		||||
    """
 | 
			
		||||
    model = BertForTokenClassification.from_pretrained(*args, **kwargs)
 | 
			
		||||
    return model
 | 
			
		||||
    return AutoModelForQuestionAnswering.from_pretrained(*args, **kwargs)
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										1630
									
								
								notebooks/Comparing-PT-and-TF-models.ipynb
									
									
									
									
									
										Normal file
									
								
							
							
						
						@ -78,7 +78,7 @@
 | 
			
		||||
    "import importlib.util\n",
 | 
			
		||||
    "import sys\n",
 | 
			
		||||
    "import tensorflow as tf\n",
 | 
			
		||||
    "import pytorch_pretrained_bert as ppb\n",
 | 
			
		||||
    "import pytorch_transformers as ppb\n",
 | 
			
		||||
    "\n",
 | 
			
		||||
    "def del_all_flags(FLAGS):\n",
 | 
			
		||||
    "    flags_dict = FLAGS._flags()    \n",
 | 
			
		||||
@ -3997,9 +3997,9 @@
 | 
			
		||||
     "name": "stderr",
 | 
			
		||||
     "output_type": "stream",
 | 
			
		||||
     "text": [
 | 
			
		||||
      "11/16/2018 11:03:05 - INFO - pytorch_pretrained_bert.modeling -   loading archive file https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased.tar.gz from cache at /Users/thomaswolf/.pytorch_pretrained_bert/9c41111e2de84547a463fd39217199738d1e3deb72d4fec4399e6e241983c6f0.ae3cef932725ca7a30cdcb93fc6e09150a55e2a130ec7af63975a16c153ae2ba\n",
 | 
			
		||||
      "11/16/2018 11:03:05 - INFO - pytorch_pretrained_bert.modeling -   extracting archive file /Users/thomaswolf/.pytorch_pretrained_bert/9c41111e2de84547a463fd39217199738d1e3deb72d4fec4399e6e241983c6f0.ae3cef932725ca7a30cdcb93fc6e09150a55e2a130ec7af63975a16c153ae2ba to temp dir /var/folders/yx/cw8n_njx3js5jksyw_qlp8p00000gn/T/tmpaqgsm566\n",
 | 
			
		||||
      "11/16/2018 11:03:08 - INFO - pytorch_pretrained_bert.modeling -   Model config {\n",
 | 
			
		||||
      "11/16/2018 11:03:05 - INFO - pytorch_transformers.modeling_bert -   loading archive file https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased.tar.gz from cache at /Users/thomaswolf/.pytorch_transformers/9c41111e2de84547a463fd39217199738d1e3deb72d4fec4399e6e241983c6f0.ae3cef932725ca7a30cdcb93fc6e09150a55e2a130ec7af63975a16c153ae2ba\n",
 | 
			
		||||
      "11/16/2018 11:03:05 - INFO - pytorch_transformers.modeling_bert -   extracting archive file /Users/thomaswolf/.pytorch_transformers/9c41111e2de84547a463fd39217199738d1e3deb72d4fec4399e6e241983c6f0.ae3cef932725ca7a30cdcb93fc6e09150a55e2a130ec7af63975a16c153ae2ba to temp dir /var/folders/yx/cw8n_njx3js5jksyw_qlp8p00000gn/T/tmpaqgsm566\n",
 | 
			
		||||
      "11/16/2018 11:03:08 - INFO - pytorch_transformers.modeling_bert -   Model config {\n",
 | 
			
		||||
      "  \"attention_probs_dropout_prob\": 0.1,\n",
 | 
			
		||||
      "  \"hidden_act\": \"gelu\",\n",
 | 
			
		||||
      "  \"hidden_dropout_prob\": 0.1,\n",
 | 
			
		||||
 | 
			
		||||
@ -86,7 +86,7 @@
 | 
			
		||||
    "spec.loader.exec_module(module)\n",
 | 
			
		||||
    "sys.modules['modeling_tensorflow'] = module\n",
 | 
			
		||||
    "\n",
 | 
			
		||||
    "spec = importlib.util.spec_from_file_location('*', original_tf_inplem_dir + '/run_squad.py')\n",
 | 
			
		||||
    "spec = importlib.util.spec_from_file_location('*', original_tf_inplem_dir + '/run_bert_squad.py')\n",
 | 
			
		||||
    "module = importlib.util.module_from_spec(spec)\n",
 | 
			
		||||
    "spec.loader.exec_module(module)\n",
 | 
			
		||||
    "sys.modules['run_squad_tensorflow'] = module\n",
 | 
			
		||||
 | 
			
		||||
@ -342,7 +342,7 @@
 | 
			
		||||
   "outputs": [],
 | 
			
		||||
   "source": [
 | 
			
		||||
    "import extract_features\n",
 | 
			
		||||
    "import pytorch_pretrained_bert as ppb\n",
 | 
			
		||||
    "import pytorch_transformers as ppb\n",
 | 
			
		||||
    "from extract_features import *"
 | 
			
		||||
   ]
 | 
			
		||||
  },
 | 
			
		||||
@ -375,8 +375,8 @@
 | 
			
		||||
     "name": "stderr",
 | 
			
		||||
     "output_type": "stream",
 | 
			
		||||
     "text": [
 | 
			
		||||
      "11/15/2018 16:21:18 - INFO - pytorch_pretrained_bert.modeling -   loading archive file ../../google_models/uncased_L-12_H-768_A-12/\n",
 | 
			
		||||
      "11/15/2018 16:21:18 - INFO - pytorch_pretrained_bert.modeling -   Model config {\n",
 | 
			
		||||
      "11/15/2018 16:21:18 - INFO - pytorch_transformers.modeling_bert -   loading archive file ../../google_models/uncased_L-12_H-768_A-12/\n",
 | 
			
		||||
      "11/15/2018 16:21:18 - INFO - pytorch_transformers.modeling_bert -   Model config {\n",
 | 
			
		||||
      "  \"attention_probs_dropout_prob\": 0.1,\n",
 | 
			
		||||
      "  \"hidden_act\": \"gelu\",\n",
 | 
			
		||||
      "  \"hidden_dropout_prob\": 0.1,\n",
 | 
			
		||||
 | 
			
		||||
@ -1,24 +0,0 @@
 | 
			
		||||
__version__ = "0.6.2"
 | 
			
		||||
from .tokenization import BertTokenizer, BasicTokenizer, WordpieceTokenizer
 | 
			
		||||
from .tokenization_openai import OpenAIGPTTokenizer
 | 
			
		||||
from .tokenization_transfo_xl import (TransfoXLTokenizer, TransfoXLCorpus)
 | 
			
		||||
from .tokenization_gpt2 import GPT2Tokenizer
 | 
			
		||||
 | 
			
		||||
from .modeling import (BertConfig, BertModel, BertForPreTraining,
 | 
			
		||||
                       BertForMaskedLM, BertForNextSentencePrediction,
 | 
			
		||||
                       BertForSequenceClassification, BertForMultipleChoice,
 | 
			
		||||
                       BertForTokenClassification, BertForQuestionAnswering,
 | 
			
		||||
                       load_tf_weights_in_bert)
 | 
			
		||||
from .modeling_openai import (OpenAIGPTConfig, OpenAIGPTModel,
 | 
			
		||||
                              OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel,
 | 
			
		||||
                              load_tf_weights_in_openai_gpt)
 | 
			
		||||
from .modeling_transfo_xl import (TransfoXLConfig, TransfoXLModel, TransfoXLLMHeadModel,
 | 
			
		||||
                                  load_tf_weights_in_transfo_xl)
 | 
			
		||||
from .modeling_gpt2 import (GPT2Config, GPT2Model,
 | 
			
		||||
                            GPT2LMHeadModel, GPT2DoubleHeadsModel,
 | 
			
		||||
                            load_tf_weights_in_gpt2)
 | 
			
		||||
 | 
			
		||||
from .optimization import BertAdam
 | 
			
		||||
from .optimization_openai import OpenAIAdam
 | 
			
		||||
 | 
			
		||||
from .file_utils import PYTORCH_PRETRAINED_BERT_CACHE, cached_path, WEIGHTS_NAME, CONFIG_NAME
 | 
			
		||||
@ -1,83 +0,0 @@
 | 
			
		||||
# coding: utf8
 | 
			
		||||
def main():
 | 
			
		||||
    import sys
 | 
			
		||||
    if (len(sys.argv) != 4 and len(sys.argv) != 5) or sys.argv[1] not in [
 | 
			
		||||
        "convert_tf_checkpoint_to_pytorch",
 | 
			
		||||
        "convert_openai_checkpoint",
 | 
			
		||||
        "convert_transfo_xl_checkpoint",
 | 
			
		||||
        "convert_gpt2_checkpoint",
 | 
			
		||||
    ]:
 | 
			
		||||
        print(
 | 
			
		||||
        "Should be used as one of: \n"
 | 
			
		||||
        ">> `pytorch_pretrained_bert convert_tf_checkpoint_to_pytorch TF_CHECKPOINT TF_CONFIG PYTORCH_DUMP_OUTPUT`, \n"
 | 
			
		||||
        ">> `pytorch_pretrained_bert convert_openai_checkpoint OPENAI_GPT_CHECKPOINT_FOLDER_PATH PYTORCH_DUMP_OUTPUT [OPENAI_GPT_CONFIG]`, \n"
 | 
			
		||||
        ">> `pytorch_pretrained_bert convert_transfo_xl_checkpoint TF_CHECKPOINT_OR_DATASET PYTORCH_DUMP_OUTPUT [TF_CONFIG]` or \n"
 | 
			
		||||
        ">> `pytorch_pretrained_bert convert_gpt2_checkpoint TF_CHECKPOINT PYTORCH_DUMP_OUTPUT [GPT2_CONFIG]`")
 | 
			
		||||
    else:
 | 
			
		||||
        if sys.argv[1] == "convert_tf_checkpoint_to_pytorch":
 | 
			
		||||
            try:
 | 
			
		||||
                from .convert_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
 | 
			
		||||
            except ImportError:
 | 
			
		||||
                print("pytorch_pretrained_bert can only be used from the commandline to convert TensorFlow models in PyTorch, "
 | 
			
		||||
                    "In that case, it requires TensorFlow to be installed. Please see "
 | 
			
		||||
                    "https://www.tensorflow.org/install/ for installation instructions.")
 | 
			
		||||
                raise
 | 
			
		||||
 | 
			
		||||
            if len(sys.argv) != 5:
 | 
			
		||||
                # pylint: disable=line-too-long
 | 
			
		||||
                print("Should be used as `pytorch_pretrained_bert convert_tf_checkpoint_to_pytorch TF_CHECKPOINT TF_CONFIG PYTORCH_DUMP_OUTPUT`")
 | 
			
		||||
            else:
 | 
			
		||||
                PYTORCH_DUMP_OUTPUT = sys.argv.pop()
 | 
			
		||||
                TF_CONFIG = sys.argv.pop()
 | 
			
		||||
                TF_CHECKPOINT = sys.argv.pop()
 | 
			
		||||
                convert_tf_checkpoint_to_pytorch(TF_CHECKPOINT, TF_CONFIG, PYTORCH_DUMP_OUTPUT)
 | 
			
		||||
        elif sys.argv[1] == "convert_openai_checkpoint":
 | 
			
		||||
            from .convert_openai_checkpoint_to_pytorch import convert_openai_checkpoint_to_pytorch
 | 
			
		||||
            OPENAI_GPT_CHECKPOINT_FOLDER_PATH = sys.argv[2]
 | 
			
		||||
            PYTORCH_DUMP_OUTPUT = sys.argv[3]
 | 
			
		||||
            if len(sys.argv) == 5:
 | 
			
		||||
                OPENAI_GPT_CONFIG = sys.argv[4]
 | 
			
		||||
            else:
 | 
			
		||||
                OPENAI_GPT_CONFIG = ""
 | 
			
		||||
            convert_openai_checkpoint_to_pytorch(OPENAI_GPT_CHECKPOINT_FOLDER_PATH,
 | 
			
		||||
                                                 OPENAI_GPT_CONFIG,
 | 
			
		||||
                                                 PYTORCH_DUMP_OUTPUT)
 | 
			
		||||
        elif sys.argv[1] == "convert_transfo_xl_checkpoint":
 | 
			
		||||
            try:
 | 
			
		||||
                from .convert_transfo_xl_checkpoint_to_pytorch import convert_transfo_xl_checkpoint_to_pytorch
 | 
			
		||||
            except ImportError:
 | 
			
		||||
                print("pytorch_pretrained_bert can only be used from the commandline to convert TensorFlow models in PyTorch, "
 | 
			
		||||
                    "In that case, it requires TensorFlow to be installed. Please see "
 | 
			
		||||
                    "https://www.tensorflow.org/install/ for installation instructions.")
 | 
			
		||||
                raise
 | 
			
		||||
 | 
			
		||||
            if 'ckpt' in sys.argv[2].lower():
 | 
			
		||||
                TF_CHECKPOINT = sys.argv[2]
 | 
			
		||||
                TF_DATASET_FILE = ""
 | 
			
		||||
            else:
 | 
			
		||||
                TF_DATASET_FILE = sys.argv[2]
 | 
			
		||||
                TF_CHECKPOINT = ""
 | 
			
		||||
            PYTORCH_DUMP_OUTPUT = sys.argv[3]
 | 
			
		||||
            if len(sys.argv) == 5:
 | 
			
		||||
                TF_CONFIG = sys.argv[4]
 | 
			
		||||
            else:
 | 
			
		||||
                TF_CONFIG = ""
 | 
			
		||||
            convert_transfo_xl_checkpoint_to_pytorch(TF_CHECKPOINT, TF_CONFIG, PYTORCH_DUMP_OUTPUT, TF_DATASET_FILE)
 | 
			
		||||
        else:
 | 
			
		||||
            try:
 | 
			
		||||
                from .convert_gpt2_checkpoint_to_pytorch import convert_gpt2_checkpoint_to_pytorch
 | 
			
		||||
            except ImportError:
 | 
			
		||||
                print("pytorch_pretrained_bert can only be used from the commandline to convert TensorFlow models in PyTorch, "
 | 
			
		||||
                    "In that case, it requires TensorFlow to be installed. Please see "
 | 
			
		||||
                    "https://www.tensorflow.org/install/ for installation instructions.")
 | 
			
		||||
                raise
 | 
			
		||||
 | 
			
		||||
            TF_CHECKPOINT = sys.argv[2]
 | 
			
		||||
            PYTORCH_DUMP_OUTPUT = sys.argv[3]
 | 
			
		||||
            if len(sys.argv) == 5:
 | 
			
		||||
                TF_CONFIG = sys.argv[4]
 | 
			
		||||
            else:
 | 
			
		||||
                TF_CONFIG = ""
 | 
			
		||||
            convert_gpt2_checkpoint_to_pytorch(TF_CHECKPOINT, TF_CONFIG, PYTORCH_DUMP_OUTPUT)
 | 
			
		||||
if __name__ == '__main__':
 | 
			
		||||
    main()
 | 
			
		||||
@ -1,711 +0,0 @@
 | 
			
		||||
# coding=utf-8
 | 
			
		||||
# Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team.
 | 
			
		||||
# Copyright (c) 2018, NVIDIA CORPORATION.  All rights reserved.
 | 
			
		||||
#
 | 
			
		||||
# Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
# you may not use this file except in compliance with the License.
 | 
			
		||||
# You may obtain a copy of the License at
 | 
			
		||||
#
 | 
			
		||||
#     http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
#
 | 
			
		||||
# Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
# distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
# See the License for the specific language governing permissions and
 | 
			
		||||
# limitations under the License.
 | 
			
		||||
"""PyTorch OpenAI GPT-2 model."""
 | 
			
		||||
 | 
			
		||||
from __future__ import absolute_import, division, print_function, unicode_literals
 | 
			
		||||
 | 
			
		||||
import collections
 | 
			
		||||
import copy
 | 
			
		||||
import json
 | 
			
		||||
import logging
 | 
			
		||||
import math
 | 
			
		||||
import os
 | 
			
		||||
import shutil
 | 
			
		||||
import tarfile
 | 
			
		||||
import tempfile
 | 
			
		||||
import sys
 | 
			
		||||
from io import open
 | 
			
		||||
 | 
			
		||||
import torch
 | 
			
		||||
import torch.nn as nn
 | 
			
		||||
from torch.nn import CrossEntropyLoss
 | 
			
		||||
from torch.nn.parameter import Parameter
 | 
			
		||||
 | 
			
		||||
from .file_utils import cached_path, CONFIG_NAME, WEIGHTS_NAME
 | 
			
		||||
from .modeling import BertLayerNorm as LayerNorm
 | 
			
		||||
 | 
			
		||||
logger = logging.getLogger(__name__)
 | 
			
		||||
 | 
			
		||||
PRETRAINED_MODEL_ARCHIVE_MAP = {"gpt2": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-pytorch_model.bin"}
 | 
			
		||||
PRETRAINED_CONFIG_ARCHIVE_MAP = {"gpt2": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-config.json"}
 | 
			
		||||
 | 
			
		||||
def load_tf_weights_in_gpt2(model, gpt2_checkpoint_path):
 | 
			
		||||
    """ Load tf checkpoints in a pytorch model
 | 
			
		||||
    """
 | 
			
		||||
    try:
 | 
			
		||||
        import re
 | 
			
		||||
        import numpy as np
 | 
			
		||||
        import tensorflow as tf
 | 
			
		||||
    except ImportError:
 | 
			
		||||
        print("Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
 | 
			
		||||
            "https://www.tensorflow.org/install/ for installation instructions.")
 | 
			
		||||
        raise
 | 
			
		||||
    tf_path = os.path.abspath(gpt2_checkpoint_path)
 | 
			
		||||
    print("Converting TensorFlow checkpoint from {}".format(tf_path))
 | 
			
		||||
    # Load weights from TF model
 | 
			
		||||
    init_vars = tf.train.list_variables(tf_path)
 | 
			
		||||
    names = []
 | 
			
		||||
    arrays = []
 | 
			
		||||
    for name, shape in init_vars:
 | 
			
		||||
        print("Loading TF weight {} with shape {}".format(name, shape))
 | 
			
		||||
        array = tf.train.load_variable(tf_path, name)
 | 
			
		||||
        names.append(name)
 | 
			
		||||
        arrays.append(array.squeeze())
 | 
			
		||||
 | 
			
		||||
    for name, array in zip(names, arrays):
 | 
			
		||||
        name = name[6:]  # skip "model/"
 | 
			
		||||
        name = name.split('/')
 | 
			
		||||
        pointer = model
 | 
			
		||||
        for m_name in name:
 | 
			
		||||
            if re.fullmatch(r'[A-Za-z]+\d+', m_name):
 | 
			
		||||
                l = re.split(r'(\d+)', m_name)
 | 
			
		||||
            else:
 | 
			
		||||
                l = [m_name]
 | 
			
		||||
            if l[0] == 'w' or l[0] == 'g':
 | 
			
		||||
                pointer = getattr(pointer, 'weight')
 | 
			
		||||
            elif l[0] == 'b':
 | 
			
		||||
                pointer = getattr(pointer, 'bias')
 | 
			
		||||
            elif l[0] == 'wpe' or l[0] == 'wte':
 | 
			
		||||
                pointer = getattr(pointer, l[0])
 | 
			
		||||
                pointer = getattr(pointer, 'weight')
 | 
			
		||||
            else:
 | 
			
		||||
                pointer = getattr(pointer, l[0])
 | 
			
		||||
            if len(l) >= 2:
 | 
			
		||||
                num = int(l[1])
 | 
			
		||||
                pointer = pointer[num]
 | 
			
		||||
        try:
 | 
			
		||||
            assert pointer.shape == array.shape
 | 
			
		||||
        except AssertionError as e:
 | 
			
		||||
            e.args += (pointer.shape, array.shape)
 | 
			
		||||
            raise
 | 
			
		||||
        print("Initialize PyTorch weight {}".format(name))
 | 
			
		||||
        pointer.data = torch.from_numpy(array)
 | 
			
		||||
    return model
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def gelu(x):
 | 
			
		||||
    return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class GPT2Config(object):
 | 
			
		||||
    """Configuration class to store the configuration of a `GPT2Model`.
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    def __init__(
 | 
			
		||||
        self,
 | 
			
		||||
        vocab_size_or_config_json_file=50257,
 | 
			
		||||
        n_positions=1024,
 | 
			
		||||
        n_ctx=1024,
 | 
			
		||||
        n_embd=768,
 | 
			
		||||
        n_layer=12,
 | 
			
		||||
        n_head=12,
 | 
			
		||||
        layer_norm_epsilon=1e-5,
 | 
			
		||||
        initializer_range=0.02,
 | 
			
		||||
    ):
 | 
			
		||||
        """Constructs GPT2Config.
 | 
			
		||||
 | 
			
		||||
        Args:
 | 
			
		||||
            vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `GPT2Model` or a configuration json file.
 | 
			
		||||
            n_positions: Number of positional embeddings.
 | 
			
		||||
            n_ctx: Size of the causal mask (usually same as n_positions).
 | 
			
		||||
            n_embd: Dimensionality of the embeddings and hidden states.
 | 
			
		||||
            n_layer: Number of hidden layers in the Transformer encoder.
 | 
			
		||||
            n_head: Number of attention heads for each attention layer in
 | 
			
		||||
                the Transformer encoder.
 | 
			
		||||
            layer_norm_epsilon: epsilon to use in the layer norm layers
 | 
			
		||||
            initializer_range: The sttdev of the truncated_normal_initializer for
 | 
			
		||||
                initializing all weight matrices.
 | 
			
		||||
        """
 | 
			
		||||
        if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2
 | 
			
		||||
                        and isinstance(vocab_size_or_config_json_file, unicode)):
 | 
			
		||||
            with open(vocab_size_or_config_json_file, "r", encoding="utf-8") as reader:
 | 
			
		||||
                json_config = json.loads(reader.read())
 | 
			
		||||
            for key, value in json_config.items():
 | 
			
		||||
                self.__dict__[key] = value
 | 
			
		||||
        elif isinstance(vocab_size_or_config_json_file, int):
 | 
			
		||||
            self.vocab_size = vocab_size_or_config_json_file
 | 
			
		||||
            self.n_ctx = n_ctx
 | 
			
		||||
            self.n_positions = n_positions
 | 
			
		||||
            self.n_embd = n_embd
 | 
			
		||||
            self.n_layer = n_layer
 | 
			
		||||
            self.n_head = n_head
 | 
			
		||||
            self.layer_norm_epsilon = layer_norm_epsilon
 | 
			
		||||
            self.initializer_range = initializer_range
 | 
			
		||||
        else:
 | 
			
		||||
            raise ValueError(
 | 
			
		||||
                "First argument must be either a vocabulary size (int)"
 | 
			
		||||
                "or the path to a pretrained model config file (str)"
 | 
			
		||||
            )
 | 
			
		||||
 | 
			
		||||
    @classmethod
 | 
			
		||||
    def from_dict(cls, json_object):
 | 
			
		||||
        """Constructs a `GPT2Config` from a Python dictionary of parameters."""
 | 
			
		||||
        config = GPT2Config(vocab_size_or_config_json_file=-1)
 | 
			
		||||
        for key, value in json_object.items():
 | 
			
		||||
            config.__dict__[key] = value
 | 
			
		||||
        return config
 | 
			
		||||
 | 
			
		||||
    @classmethod
 | 
			
		||||
    def from_json_file(cls, json_file):
 | 
			
		||||
        """Constructs a `GPT2Config` from a json file of parameters."""
 | 
			
		||||
        with open(json_file, "r", encoding="utf-8") as reader:
 | 
			
		||||
            text = reader.read()
 | 
			
		||||
        return cls.from_dict(json.loads(text))
 | 
			
		||||
 | 
			
		||||
    def __repr__(self):
 | 
			
		||||
        return str(self.to_json_string())
 | 
			
		||||
 | 
			
		||||
    def to_dict(self):
 | 
			
		||||
        """Serializes this instance to a Python dictionary."""
 | 
			
		||||
        output = copy.deepcopy(self.__dict__)
 | 
			
		||||
        return output
 | 
			
		||||
 | 
			
		||||
    def to_json_string(self):
 | 
			
		||||
        """Serializes this instance to a JSON string."""
 | 
			
		||||
        return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
 | 
			
		||||
 | 
			
		||||
    def to_json_file(self, json_file_path):
 | 
			
		||||
        """ Save this instance to a json file."""
 | 
			
		||||
        with open(json_file_path, "w", encoding='utf-8') as writer:
 | 
			
		||||
            writer.write(self.to_json_string())
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class Conv1D(nn.Module):
 | 
			
		||||
    def __init__(self, nf, nx):
 | 
			
		||||
        super(Conv1D, self).__init__()
 | 
			
		||||
        self.nf = nf
 | 
			
		||||
        w = torch.empty(nx, nf)
 | 
			
		||||
        nn.init.normal_(w, std=0.02)
 | 
			
		||||
        self.weight = Parameter(w)
 | 
			
		||||
        self.bias = Parameter(torch.zeros(nf))
 | 
			
		||||
 | 
			
		||||
    def forward(self, x):
 | 
			
		||||
        size_out = x.size()[:-1] + (self.nf,)
 | 
			
		||||
        x = torch.addmm(self.bias, x.view(-1, x.size(-1)), self.weight)
 | 
			
		||||
        x = x.view(*size_out)
 | 
			
		||||
        return x
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class Attention(nn.Module):
 | 
			
		||||
    def __init__(self, nx, n_ctx, config, scale=False):
 | 
			
		||||
        super(Attention, self).__init__()
 | 
			
		||||
        n_state = nx  # in Attention: n_state=768 (nx=n_embd)
 | 
			
		||||
        # [switch nx => n_state from Block to Attention to keep identical to TF implem]
 | 
			
		||||
        assert n_state % config.n_head == 0
 | 
			
		||||
        self.register_buffer("bias", torch.tril(torch.ones(n_ctx, n_ctx)).view(1, 1, n_ctx, n_ctx))
 | 
			
		||||
        self.n_head = config.n_head
 | 
			
		||||
        self.split_size = n_state
 | 
			
		||||
        self.scale = scale
 | 
			
		||||
        self.c_attn = Conv1D(n_state * 3, nx)
 | 
			
		||||
        self.c_proj = Conv1D(n_state, nx)
 | 
			
		||||
 | 
			
		||||
    def _attn(self, q, k, v):
 | 
			
		||||
        w = torch.matmul(q, k)
 | 
			
		||||
        if self.scale:
 | 
			
		||||
            w = w / math.sqrt(v.size(-1))
 | 
			
		||||
        nd, ns = w.size(-2), w.size(-1)
 | 
			
		||||
        b = self.bias[:, :, ns-nd:ns, :ns]
 | 
			
		||||
        w = w * b - 1e4 * (1 - b)
 | 
			
		||||
 | 
			
		||||
        w = nn.Softmax(dim=-1)(w)
 | 
			
		||||
        return torch.matmul(w, v)
 | 
			
		||||
 | 
			
		||||
    def merge_heads(self, x):
 | 
			
		||||
        x = x.permute(0, 2, 1, 3).contiguous()
 | 
			
		||||
        new_x_shape = x.size()[:-2] + (x.size(-2) * x.size(-1),)
 | 
			
		||||
        return x.view(*new_x_shape)  # in Tensorflow implem: fct merge_states
 | 
			
		||||
 | 
			
		||||
    def split_heads(self, x, k=False):
 | 
			
		||||
        new_x_shape = x.size()[:-1] + (self.n_head, x.size(-1) // self.n_head)
 | 
			
		||||
        x = x.view(*new_x_shape)  # in Tensorflow implem: fct split_states
 | 
			
		||||
        if k:
 | 
			
		||||
            return x.permute(0, 2, 3, 1)  # (batch, head, head_features, seq_length)
 | 
			
		||||
        else:
 | 
			
		||||
            return x.permute(0, 2, 1, 3)  # (batch, head, seq_length, head_features)
 | 
			
		||||
 | 
			
		||||
    def forward(self, x, layer_past=None):
 | 
			
		||||
        x = self.c_attn(x)
 | 
			
		||||
        query, key, value = x.split(self.split_size, dim=2)
 | 
			
		||||
        query = self.split_heads(query)
 | 
			
		||||
        key = self.split_heads(key, k=True)
 | 
			
		||||
        value = self.split_heads(value)
 | 
			
		||||
        if layer_past is not None:
 | 
			
		||||
            past_key, past_value = layer_past[0].transpose(-2, -1), layer_past[1]  # transpose back cf below
 | 
			
		||||
            key = torch.cat((past_key, key), dim=-1)
 | 
			
		||||
            value = torch.cat((past_value, value), dim=-2)
 | 
			
		||||
        present = torch.stack((key.transpose(-2, -1), value))  # transpose to have same shapes for stacking
 | 
			
		||||
        a = self._attn(query, key, value)
 | 
			
		||||
        a = self.merge_heads(a)
 | 
			
		||||
        a = self.c_proj(a)
 | 
			
		||||
        return a, present
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class MLP(nn.Module):
 | 
			
		||||
    def __init__(self, n_state, config):  # in MLP: n_state=3072 (4 * n_embd)
 | 
			
		||||
        super(MLP, self).__init__()
 | 
			
		||||
        nx = config.n_embd
 | 
			
		||||
        self.c_fc = Conv1D(n_state, nx)
 | 
			
		||||
        self.c_proj = Conv1D(nx, n_state)
 | 
			
		||||
        self.act = gelu
 | 
			
		||||
 | 
			
		||||
    def forward(self, x):
 | 
			
		||||
        h = self.act(self.c_fc(x))
 | 
			
		||||
        h2 = self.c_proj(h)
 | 
			
		||||
        return h2
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class Block(nn.Module):
 | 
			
		||||
    def __init__(self, n_ctx, config, scale=False):
 | 
			
		||||
        super(Block, self).__init__()
 | 
			
		||||
        nx = config.n_embd
 | 
			
		||||
        self.ln_1 = LayerNorm(nx, eps=config.layer_norm_epsilon)
 | 
			
		||||
        self.attn = Attention(nx, n_ctx, config, scale)
 | 
			
		||||
        self.ln_2 = LayerNorm(nx, eps=config.layer_norm_epsilon)
 | 
			
		||||
        self.mlp = MLP(4 * nx, config)
 | 
			
		||||
 | 
			
		||||
    def forward(self, x, layer_past=None):
 | 
			
		||||
        a, present = self.attn(self.ln_1(x), layer_past=layer_past)
 | 
			
		||||
        x = x + a
 | 
			
		||||
        m = self.mlp(self.ln_2(x))
 | 
			
		||||
        x = x + m
 | 
			
		||||
        return x, present
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class GPT2LMHead(nn.Module):
 | 
			
		||||
    """ Language Model Head for the transformer """
 | 
			
		||||
 | 
			
		||||
    def __init__(self, model_embeddings_weights, config):
 | 
			
		||||
        super(GPT2LMHead, self).__init__()
 | 
			
		||||
        self.n_embd = config.n_embd
 | 
			
		||||
        self.set_embeddings_weights(model_embeddings_weights)
 | 
			
		||||
 | 
			
		||||
    def set_embeddings_weights(self, model_embeddings_weights):
 | 
			
		||||
        embed_shape = model_embeddings_weights.shape
 | 
			
		||||
        self.decoder = nn.Linear(embed_shape[1], embed_shape[0], bias=False)
 | 
			
		||||
        self.decoder.weight = model_embeddings_weights  # Tied weights
 | 
			
		||||
 | 
			
		||||
    def forward(self, hidden_state):
 | 
			
		||||
        # Truncated Language modeling logits (we remove the last token)
 | 
			
		||||
        # h_trunc = h[:, :-1].contiguous().view(-1, self.n_embd)
 | 
			
		||||
        lm_logits = self.decoder(hidden_state)
 | 
			
		||||
        return lm_logits
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class GPT2MultipleChoiceHead(nn.Module):
 | 
			
		||||
    """ Classifier Head for the transformer """
 | 
			
		||||
 | 
			
		||||
    def __init__(self, config):
 | 
			
		||||
        super(GPT2MultipleChoiceHead, self).__init__()
 | 
			
		||||
        self.n_embd = config.n_embd
 | 
			
		||||
        self.linear = nn.Linear(config.n_embd, 1)
 | 
			
		||||
 | 
			
		||||
        nn.init.normal_(self.linear.weight, std=0.02)
 | 
			
		||||
        nn.init.normal_(self.linear.bias, 0)
 | 
			
		||||
 | 
			
		||||
    def forward(self, hidden_states, mc_token_ids):
 | 
			
		||||
        # Classification logits
 | 
			
		||||
        # hidden_state (bsz, num_choices, seq_length, hidden_size)
 | 
			
		||||
        # mc_token_ids (bsz, num_choices)
 | 
			
		||||
        mc_token_ids = mc_token_ids.unsqueeze(-1).unsqueeze(-1).expand(-1, -1, -1, hidden_states.size(-1))
 | 
			
		||||
        # (bsz, num_choices, 1, hidden_size)
 | 
			
		||||
        multiple_choice_h = hidden_states.gather(2, mc_token_ids).squeeze(2)
 | 
			
		||||
        # (bsz, num_choices, hidden_size)
 | 
			
		||||
        multiple_choice_logits = self.linear(multiple_choice_h).squeeze(-1)
 | 
			
		||||
        # (bsz, num_choices)
 | 
			
		||||
        return multiple_choice_logits
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class GPT2PreTrainedModel(nn.Module):
 | 
			
		||||
    """ An abstract class to handle weights initialization and
 | 
			
		||||
        a simple interface for dowloading and loading pretrained models.
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    def __init__(self, config, *inputs, **kwargs):
 | 
			
		||||
        super(GPT2PreTrainedModel, self).__init__()
 | 
			
		||||
        if not isinstance(config, GPT2Config):
 | 
			
		||||
            raise ValueError(
 | 
			
		||||
                "Parameter config in `{}(config)` should be an instance of class `GPT2Config`. "
 | 
			
		||||
                "To create a model from a pretrained model use "
 | 
			
		||||
                "`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
 | 
			
		||||
                    self.__class__.__name__, self.__class__.__name__
 | 
			
		||||
                )
 | 
			
		||||
            )
 | 
			
		||||
        self.config = config
 | 
			
		||||
 | 
			
		||||
    def set_tied(self):
 | 
			
		||||
        pass
 | 
			
		||||
 | 
			
		||||
    def init_weights(self, module):
 | 
			
		||||
        """ Initialize the weights.
 | 
			
		||||
        """
 | 
			
		||||
        if isinstance(module, (nn.Linear, nn.Embedding)):
 | 
			
		||||
            # Slightly different from the TF version which uses truncated_normal for initialization
 | 
			
		||||
            # cf https://github.com/pytorch/pytorch/pull/5617
 | 
			
		||||
            module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
 | 
			
		||||
        elif isinstance(module, LayerNorm):
 | 
			
		||||
            module.bias.data.zero_()
 | 
			
		||||
            module.weight.data.fill_(1.0)
 | 
			
		||||
        if isinstance(module, nn.Linear) and module.bias is not None:
 | 
			
		||||
            module.bias.data.zero_()
 | 
			
		||||
 | 
			
		||||
    @classmethod
 | 
			
		||||
    def from_pretrained(
 | 
			
		||||
        cls, pretrained_model_name_or_path, state_dict=None, cache_dir=None, from_tf=False, *inputs, **kwargs
 | 
			
		||||
    ):
 | 
			
		||||
        """
 | 
			
		||||
        Instantiate a GPT2PreTrainedModel from a pre-trained model file or a pytorch state dict.
 | 
			
		||||
        Download and cache the pre-trained model file if needed.
 | 
			
		||||
 | 
			
		||||
        Params:
 | 
			
		||||
            pretrained_model_name_or_path: either:
 | 
			
		||||
                - a str with the name of a pre-trained model to load selected in the list of:
 | 
			
		||||
                    . `gpt2`
 | 
			
		||||
                - a path or url to a pretrained model archive containing:
 | 
			
		||||
                    . `gpt2_config.json` a configuration file for the model
 | 
			
		||||
                    . `pytorch_model.bin` a PyTorch dump of a GPT2Model instance
 | 
			
		||||
                - a path or url to a pretrained model archive containing:
 | 
			
		||||
                    . `gpt2_config.json` a configuration file for the model
 | 
			
		||||
                    . a TensorFlow checkpoint with trained weights
 | 
			
		||||
            from_tf: should we load the weights from a locally saved TensorFlow checkpoint
 | 
			
		||||
            cache_dir: an optional path to a folder in which the pre-trained models will be cached.
 | 
			
		||||
            state_dict: an optional state dictionary (collections.OrderedDict object) to use instead of pre-trained models
 | 
			
		||||
            *inputs, **kwargs: additional input for the specific GPT class
 | 
			
		||||
        """
 | 
			
		||||
        if pretrained_model_name_or_path in PRETRAINED_MODEL_ARCHIVE_MAP:
 | 
			
		||||
            archive_file = PRETRAINED_MODEL_ARCHIVE_MAP[pretrained_model_name_or_path]
 | 
			
		||||
            config_file = PRETRAINED_CONFIG_ARCHIVE_MAP[pretrained_model_name_or_path]
 | 
			
		||||
        else:
 | 
			
		||||
            archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)
 | 
			
		||||
            config_file = os.path.join(pretrained_model_name_or_path, CONFIG_NAME)
 | 
			
		||||
        # redirect to the cache, if necessary
 | 
			
		||||
        try:
 | 
			
		||||
            resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir)
 | 
			
		||||
            resolved_config_file = cached_path(config_file, cache_dir=cache_dir)
 | 
			
		||||
        except EnvironmentError:
 | 
			
		||||
            logger.error(
 | 
			
		||||
                "Model name '{}' was not found in model name list ({}). "
 | 
			
		||||
                "We assumed '{}' was a path or url but couldn't find files {} and {} "
 | 
			
		||||
                "at this path or url.".format(
 | 
			
		||||
                    pretrained_model_name_or_path, ", ".join(PRETRAINED_MODEL_ARCHIVE_MAP.keys()), pretrained_model_name_or_path,
 | 
			
		||||
                    archive_file, config_file
 | 
			
		||||
                )
 | 
			
		||||
            )
 | 
			
		||||
            return None
 | 
			
		||||
        if resolved_archive_file == archive_file and resolved_config_file == config_file:
 | 
			
		||||
            logger.info("loading weights file {}".format(archive_file))
 | 
			
		||||
            logger.info("loading configuration file {}".format(config_file))
 | 
			
		||||
        else:
 | 
			
		||||
            logger.info("loading weights file {} from cache at {}".format(
 | 
			
		||||
                archive_file, resolved_archive_file))
 | 
			
		||||
            logger.info("loading configuration file {} from cache at {}".format(
 | 
			
		||||
                config_file, resolved_config_file))
 | 
			
		||||
        # Load config
 | 
			
		||||
        config = GPT2Config.from_json_file(resolved_config_file)
 | 
			
		||||
        logger.info("Model config {}".format(config))
 | 
			
		||||
        # Instantiate model.
 | 
			
		||||
        model = cls(config, *inputs, **kwargs)
 | 
			
		||||
        if state_dict is None and not from_tf:
 | 
			
		||||
            state_dict = torch.load(resolved_archive_file, map_location='cpu')
 | 
			
		||||
        if from_tf:
 | 
			
		||||
            # Directly load from a TensorFlow checkpoint (stored as NumPy array)
 | 
			
		||||
            return load_tf_weights_in_gpt2(model, resolved_archive_file)
 | 
			
		||||
 | 
			
		||||
        old_keys = []
 | 
			
		||||
        new_keys = []
 | 
			
		||||
        for key in state_dict.keys():
 | 
			
		||||
            new_key = None
 | 
			
		||||
            if key.endswith(".g"):
 | 
			
		||||
                new_key = key[:-2] + ".weight"
 | 
			
		||||
            elif key.endswith(".b"):
 | 
			
		||||
                new_key = key[:-2] + ".bias"
 | 
			
		||||
            elif key.endswith(".w"):
 | 
			
		||||
                new_key = key[:-2] + ".weight"
 | 
			
		||||
            if new_key:
 | 
			
		||||
                old_keys.append(key)
 | 
			
		||||
                new_keys.append(new_key)
 | 
			
		||||
        for old_key, new_key in zip(old_keys, new_keys):
 | 
			
		||||
            state_dict[new_key] = state_dict.pop(old_key)
 | 
			
		||||
 | 
			
		||||
        missing_keys = []
 | 
			
		||||
        unexpected_keys = []
 | 
			
		||||
        error_msgs = []
 | 
			
		||||
        # copy state_dict so _load_from_state_dict can modify it
 | 
			
		||||
        metadata = getattr(state_dict, "_metadata", None)
 | 
			
		||||
        state_dict = state_dict.copy()
 | 
			
		||||
        if metadata is not None:
 | 
			
		||||
            state_dict._metadata = metadata
 | 
			
		||||
 | 
			
		||||
        def load(module, prefix=""):
 | 
			
		||||
            local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
 | 
			
		||||
            module._load_from_state_dict(
 | 
			
		||||
                state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs
 | 
			
		||||
            )
 | 
			
		||||
            for name, child in module._modules.items():
 | 
			
		||||
                if child is not None:
 | 
			
		||||
                    load(child, prefix + name + ".")
 | 
			
		||||
 | 
			
		||||
        start_model = model
 | 
			
		||||
        if hasattr(model, "transformer") and all(not s.startswith('transformer.') for s in state_dict.keys()):
 | 
			
		||||
            start_model = model.transformer
 | 
			
		||||
        load(start_model, prefix="")
 | 
			
		||||
 | 
			
		||||
        if len(missing_keys) > 0:
 | 
			
		||||
            logger.info(
 | 
			
		||||
                "Weights of {} not initialized from pretrained model: {}".format(model.__class__.__name__, missing_keys)
 | 
			
		||||
            )
 | 
			
		||||
        if len(unexpected_keys) > 0:
 | 
			
		||||
            logger.info(
 | 
			
		||||
                "Weights from pretrained model not used in {}: {}".format(model.__class__.__name__, unexpected_keys)
 | 
			
		||||
            )
 | 
			
		||||
        if len(error_msgs) > 0:
 | 
			
		||||
            raise RuntimeError(
 | 
			
		||||
                "Error(s) in loading state_dict for {}:\n\t{}".format(model.__class__.__name__, "\n\t".join(error_msgs))
 | 
			
		||||
            )
 | 
			
		||||
 | 
			
		||||
        # Make sure we are still sharing the output and input embeddings after loading weights
 | 
			
		||||
        model.set_tied()
 | 
			
		||||
        return model
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class GPT2Model(GPT2PreTrainedModel):
 | 
			
		||||
    """OpenAI GPT-2 model ("Language Models are Unsupervised Multitask Learners").
 | 
			
		||||
 | 
			
		||||
    Params:
 | 
			
		||||
        config: a GPT2Config class instance with the configuration to build a new model
 | 
			
		||||
 | 
			
		||||
    Inputs:
 | 
			
		||||
        `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] (or more generally [d_1, ..., d_n, sequence_length]
 | 
			
		||||
            were d_1 ... d_n are arbitrary dimensions) with the word BPE token indices selected in the range [0, config.vocab_size[
 | 
			
		||||
        `position_ids`: an optional torch.LongTensor with the same shape as input_ids
 | 
			
		||||
            with the position indices (selected in the range [0, config.n_positions - 1[.
 | 
			
		||||
        `token_type_ids`: an optional torch.LongTensor with the same shape as input_ids
 | 
			
		||||
            You can use it to add a third type of embedding to each input token in the sequence
 | 
			
		||||
            (the previous two being the word and position embeddings).
 | 
			
		||||
            The input, position and token_type embeddings are summed inside the Transformer before the first
 | 
			
		||||
            self-attention block.
 | 
			
		||||
        `past`: an optional list of torch.LongTensor that contains pre-computed hidden-states
 | 
			
		||||
            (key and values in the attention blocks) to speed up sequential decoding
 | 
			
		||||
            (this is the presents output of the model, cf. below).
 | 
			
		||||
 | 
			
		||||
    Outputs a tuple consisting of:
 | 
			
		||||
        `hidden_states`: the encoded-hidden-states at the top of the model
 | 
			
		||||
            as a torch.FloatTensor of size [batch_size, sequence_length, hidden_size]
 | 
			
		||||
            (or more generally [d_1, ..., d_n, hidden_size] were d_1 ... d_n are the dimension of input_ids)
 | 
			
		||||
        `presents`: a list of pre-computed hidden-states (key and values in each attention blocks) as
 | 
			
		||||
            torch.FloatTensors. They can be reused to speed up sequential decoding.
 | 
			
		||||
 | 
			
		||||
    Example usage:
 | 
			
		||||
    ```python
 | 
			
		||||
    # Already been converted into BPE token ids
 | 
			
		||||
    input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
 | 
			
		||||
 | 
			
		||||
    config = modeling_gpt2.GPT2Config()
 | 
			
		||||
 | 
			
		||||
    model = modeling_gpt2.GPT2Model(config)
 | 
			
		||||
    hidden_states, presents = model(input_ids)
 | 
			
		||||
    ```
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    def __init__(self, config):
 | 
			
		||||
        super(GPT2Model, self).__init__(config)
 | 
			
		||||
        self.wte = nn.Embedding(config.vocab_size, config.n_embd)
 | 
			
		||||
        self.wpe = nn.Embedding(config.n_positions, config.n_embd)
 | 
			
		||||
        block = Block(config.n_ctx, config, scale=True)
 | 
			
		||||
        self.h = nn.ModuleList([copy.deepcopy(block) for _ in range(config.n_layer)])
 | 
			
		||||
        self.ln_f = LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
 | 
			
		||||
 | 
			
		||||
        self.apply(self.init_weights)
 | 
			
		||||
 | 
			
		||||
    def forward(self, input_ids, position_ids=None, token_type_ids=None, past=None):
 | 
			
		||||
        if past is None:
 | 
			
		||||
            past_length = 0
 | 
			
		||||
            past = [None] * len(self.h)
 | 
			
		||||
        else:
 | 
			
		||||
            past_length = past[0][0].size(-2)
 | 
			
		||||
        if position_ids is None:
 | 
			
		||||
            position_ids = torch.arange(past_length, input_ids.size(-1) + past_length, dtype=torch.long, device=input_ids.device)
 | 
			
		||||
            position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
 | 
			
		||||
 | 
			
		||||
        input_shape = input_ids.size()
 | 
			
		||||
        input_ids = input_ids.view(-1, input_ids.size(-1))
 | 
			
		||||
        position_ids = position_ids.view(-1, position_ids.size(-1))
 | 
			
		||||
 | 
			
		||||
        inputs_embeds = self.wte(input_ids)
 | 
			
		||||
        position_embeds = self.wpe(position_ids)
 | 
			
		||||
        if token_type_ids is not None:
 | 
			
		||||
            token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1))
 | 
			
		||||
            token_type_embeds = self.wte(token_type_ids)
 | 
			
		||||
        else:
 | 
			
		||||
            token_type_embeds = 0
 | 
			
		||||
        hidden_states = inputs_embeds + position_embeds + token_type_embeds
 | 
			
		||||
        presents = []
 | 
			
		||||
        for block, layer_past in zip(self.h, past):
 | 
			
		||||
            hidden_states, present = block(hidden_states, layer_past)
 | 
			
		||||
            presents.append(present)
 | 
			
		||||
        hidden_states = self.ln_f(hidden_states)
 | 
			
		||||
        output_shape = input_shape + (hidden_states.size(-1),)
 | 
			
		||||
        return hidden_states.view(*output_shape), presents
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class GPT2LMHeadModel(GPT2PreTrainedModel):
 | 
			
		||||
    """OpenAI GPT-2 model with a Language Modeling head ("Language Models are Unsupervised Multitask Learners").
 | 
			
		||||
 | 
			
		||||
    Params:
 | 
			
		||||
        config: a GPT2Config class instance with the configuration to build a new model
 | 
			
		||||
 | 
			
		||||
    Inputs:
 | 
			
		||||
        `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] (or more generally [d_1, ..., d_n, sequence_length]
 | 
			
		||||
            were d_1 ... d_n are arbitrary dimensions) with the word BPE token indices selected in the range [0, config.vocab_size[
 | 
			
		||||
        `position_ids`: an optional torch.LongTensor with the same shape as input_ids
 | 
			
		||||
            with the position indices (selected in the range [0, config.n_positions - 1[.
 | 
			
		||||
        `token_type_ids`: an optional torch.LongTensor with the same shape as input_ids
 | 
			
		||||
            You can use it to add a third type of embedding to each input token in the sequence
 | 
			
		||||
            (the previous two being the word and position embeddings).
 | 
			
		||||
            The input, position and token_type embeddings are summed inside the Transformer before the first
 | 
			
		||||
            self-attention block.
 | 
			
		||||
        `lm_labels`: optional language modeling labels: torch.LongTensor of shape [batch_size, sequence_length]
 | 
			
		||||
            with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss
 | 
			
		||||
            is only computed for the labels set in [0, ..., vocab_size]
 | 
			
		||||
        `past`: an optional list of torch.LongTensor that contains pre-computed hidden-states
 | 
			
		||||
            (key and values in the attention blocks) to speed up sequential decoding
 | 
			
		||||
            (this is the presents output of the model, cf. below).
 | 
			
		||||
 | 
			
		||||
    Outputs:
 | 
			
		||||
        if `lm_labels` is not `None`:
 | 
			
		||||
            Outputs the language modeling loss.
 | 
			
		||||
        else a tuple:
 | 
			
		||||
            `lm_logits`: the language modeling logits as a torch.FloatTensor of size [batch_size, sequence_length, config.vocab_size]
 | 
			
		||||
                (or more generally [d_1, ..., d_n, config.vocab_size] were d_1 ... d_n are the dimension of input_ids)
 | 
			
		||||
            `presents`: a list of pre-computed hidden-states (key and values in each attention blocks) as
 | 
			
		||||
                torch.FloatTensors. They can be reused to speed up sequential decoding.
 | 
			
		||||
 | 
			
		||||
    Example usage:
 | 
			
		||||
    ```python
 | 
			
		||||
    # Already been converted into BPE token ids
 | 
			
		||||
    input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
 | 
			
		||||
 | 
			
		||||
    config = modeling_gpt2.GPT2Config()
 | 
			
		||||
 | 
			
		||||
    model = modeling_gpt2.GPT2LMHeadModel(config)
 | 
			
		||||
    lm_logits, presents = model(input_ids)
 | 
			
		||||
    ```
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    def __init__(self, config):
 | 
			
		||||
        super(GPT2LMHeadModel, self).__init__(config)
 | 
			
		||||
        self.transformer = GPT2Model(config)
 | 
			
		||||
        self.lm_head = GPT2LMHead(self.transformer.wte.weight, config)
 | 
			
		||||
        self.apply(self.init_weights)
 | 
			
		||||
 | 
			
		||||
    def set_tied(self):
 | 
			
		||||
        """ Make sure we are sharing the embeddings
 | 
			
		||||
        """
 | 
			
		||||
        self.lm_head.set_embeddings_weights(self.transformer.wte.weight)
 | 
			
		||||
 | 
			
		||||
    def forward(self, input_ids, position_ids=None, token_type_ids=None, lm_labels=None, past=None):
 | 
			
		||||
        hidden_states, presents = self.transformer(input_ids, position_ids, token_type_ids, past)
 | 
			
		||||
        lm_logits = self.lm_head(hidden_states)
 | 
			
		||||
        if lm_labels is not None:
 | 
			
		||||
            # Shift so that tokens < n predict n
 | 
			
		||||
            shift_logits = lm_logits[:, :-1].contiguous()
 | 
			
		||||
            shift_labels = lm_labels[:, 1:].contiguous()
 | 
			
		||||
 | 
			
		||||
            # Flatten the tokens
 | 
			
		||||
            loss_fct = CrossEntropyLoss(ignore_index=-1)
 | 
			
		||||
            loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)),
 | 
			
		||||
                            shift_labels.view(-1))
 | 
			
		||||
            return loss
 | 
			
		||||
        return lm_logits, presents
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class GPT2DoubleHeadsModel(GPT2PreTrainedModel):
 | 
			
		||||
    """OpenAI GPT-2 model with a Language Modeling and a Multiple Choice head ("Language Models are Unsupervised Multitask Learners").
 | 
			
		||||
 | 
			
		||||
    Params:
 | 
			
		||||
        config: a GPT2Config class instance with the configuration to build a new model
 | 
			
		||||
 | 
			
		||||
    Inputs:
 | 
			
		||||
        `input_ids`: a torch.LongTensor of shape [batch_size, num_choices, sequence_length] with the BPE token
 | 
			
		||||
            indices selected in the range [0, config.vocab_size[
 | 
			
		||||
        `mc_token_ids`: a torch.LongTensor of shape [batch_size, num_choices] with the index of the token from
 | 
			
		||||
            which we should take the hidden state to feed the multiple choice classifier (usually last token of the sequence)
 | 
			
		||||
        `position_ids`: an optional torch.LongTensor with the same shape as input_ids
 | 
			
		||||
            with the position indices (selected in the range [0, config.n_positions - 1[.
 | 
			
		||||
        `token_type_ids`: an optional torch.LongTensor with the same shape as input_ids
 | 
			
		||||
            You can use it to add a third type of embedding to each input token in the sequence
 | 
			
		||||
            (the previous two being the word and position embeddings).
 | 
			
		||||
            The input, position and token_type embeddings are summed inside the Transformer before the first
 | 
			
		||||
            self-attention block.
 | 
			
		||||
        `lm_labels`: optional language modeling labels: torch.LongTensor of shape [batch_size, num_choices, sequence_length]
 | 
			
		||||
            with indices selected in [-1, 0, ..., config.vocab_size]. All labels set to -1 are ignored (masked), the loss
 | 
			
		||||
            is only computed for the labels set in [0, ..., config.vocab_size]
 | 
			
		||||
        `multiple_choice_labels`: optional multiple choice labels: torch.LongTensor of shape [batch_size]
 | 
			
		||||
            with indices selected in [0, ..., num_choices].
 | 
			
		||||
        `past`: an optional list of torch.LongTensor that contains pre-computed hidden-states
 | 
			
		||||
            (key and values in the attention blocks) to speed up sequential decoding
 | 
			
		||||
            (this is the presents output of the model, cf. below).
 | 
			
		||||
 | 
			
		||||
    Outputs:
 | 
			
		||||
        if `lm_labels` and `multiple_choice_labels` are not `None`:
 | 
			
		||||
            Outputs a tuple of losses with the language modeling loss and the multiple choice loss.
 | 
			
		||||
        else: a tuple with
 | 
			
		||||
            `lm_logits`: the language modeling logits as a torch.FloatTensor of size [batch_size, num_choices, sequence_length, config.vocab_size]
 | 
			
		||||
            `multiple_choice_logits`: the multiple choice logits as a torch.FloatTensor of size [batch_size, num_choices]
 | 
			
		||||
            `presents`: a list of pre-computed hidden-states (key and values in each attention blocks) as
 | 
			
		||||
                torch.FloatTensors. They can be reused to speed up sequential decoding.
 | 
			
		||||
 | 
			
		||||
    Example usage:
 | 
			
		||||
    ```python
 | 
			
		||||
    # Already been converted into BPE token ids
 | 
			
		||||
    input_ids = torch.LongTensor([[[31, 51, 99], [15, 5, 0]]])  # (bsz, number of choice, seq length)
 | 
			
		||||
    mc_token_ids = torch.LongTensor([[2], [1]]) # (bsz, number of choice)
 | 
			
		||||
 | 
			
		||||
    config = modeling_gpt2.GPT2Config()
 | 
			
		||||
 | 
			
		||||
    model = modeling_gpt2.GPT2LMHeadModel(config)
 | 
			
		||||
    lm_logits, multiple_choice_logits, presents = model(input_ids, mc_token_ids)
 | 
			
		||||
    ```
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    def __init__(self, config):
 | 
			
		||||
        super(GPT2DoubleHeadsModel, self).__init__(config)
 | 
			
		||||
        self.transformer = GPT2Model(config)
 | 
			
		||||
        self.lm_head = GPT2LMHead(self.transformer.wte.weight, config)
 | 
			
		||||
        self.multiple_choice_head = GPT2MultipleChoiceHead(config)
 | 
			
		||||
        self.apply(self.init_weights)
 | 
			
		||||
 | 
			
		||||
    def set_tied(self):
 | 
			
		||||
        """ Make sure we are sharing the embeddings
 | 
			
		||||
        """
 | 
			
		||||
        self.lm_head.set_embeddings_weights(self.transformer.wte.weight)
 | 
			
		||||
 | 
			
		||||
    def forward(self, input_ids, mc_token_ids, lm_labels=None, mc_labels=None, token_type_ids=None, position_ids=None, past=None):
 | 
			
		||||
        hidden_states, presents = self.transformer(input_ids, position_ids, token_type_ids, past)
 | 
			
		||||
        lm_logits = self.lm_head(hidden_states)
 | 
			
		||||
        mc_logits = self.multiple_choice_head(hidden_states, mc_token_ids)
 | 
			
		||||
        losses = []
 | 
			
		||||
        if lm_labels is not None:
 | 
			
		||||
            shift_logits = lm_logits[:, :-1].contiguous()
 | 
			
		||||
            shift_labels = lm_labels[:, 1:].contiguous()
 | 
			
		||||
            loss_fct = CrossEntropyLoss(ignore_index=-1)
 | 
			
		||||
            losses.append(loss_fct(shift_logits.view(-1,
 | 
			
		||||
                          shift_logits.size(-1)), shift_labels.view(-1)))
 | 
			
		||||
        if mc_labels is not None:
 | 
			
		||||
            loss_fct = CrossEntropyLoss()
 | 
			
		||||
            losses.append(loss_fct(mc_logits.view(-1, mc_logits.size(-1)), mc_labels.view(-1)))
 | 
			
		||||
        if losses:
 | 
			
		||||
            return losses
 | 
			
		||||
        return lm_logits, mc_logits, presents
 | 
			
		||||
@ -1,822 +0,0 @@
 | 
			
		||||
# coding=utf-8
 | 
			
		||||
# Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team.
 | 
			
		||||
# Copyright (c) 2018, NVIDIA CORPORATION.  All rights reserved.
 | 
			
		||||
#
 | 
			
		||||
# Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
# you may not use this file except in compliance with the License.
 | 
			
		||||
# You may obtain a copy of the License at
 | 
			
		||||
#
 | 
			
		||||
#     http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
#
 | 
			
		||||
# Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
# distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
# See the License for the specific language governing permissions and
 | 
			
		||||
# limitations under the License.
 | 
			
		||||
"""PyTorch OpenAI GPT model."""
 | 
			
		||||
 | 
			
		||||
from __future__ import absolute_import, division, print_function, unicode_literals
 | 
			
		||||
 | 
			
		||||
import collections
 | 
			
		||||
import copy
 | 
			
		||||
import json
 | 
			
		||||
import logging
 | 
			
		||||
import math
 | 
			
		||||
import os
 | 
			
		||||
import shutil
 | 
			
		||||
import tarfile
 | 
			
		||||
import tempfile
 | 
			
		||||
import sys
 | 
			
		||||
from io import open
 | 
			
		||||
 | 
			
		||||
import torch
 | 
			
		||||
import torch.nn as nn
 | 
			
		||||
from torch.nn import CrossEntropyLoss
 | 
			
		||||
from torch.nn.parameter import Parameter
 | 
			
		||||
 | 
			
		||||
from .file_utils import cached_path, CONFIG_NAME, WEIGHTS_NAME
 | 
			
		||||
from .modeling import BertLayerNorm as LayerNorm
 | 
			
		||||
 | 
			
		||||
logger = logging.getLogger(__name__)
 | 
			
		||||
 | 
			
		||||
PRETRAINED_MODEL_ARCHIVE_MAP = {"openai-gpt": "https://s3.amazonaws.com/models.huggingface.co/bert/openai-gpt-pytorch_model.bin"}
 | 
			
		||||
PRETRAINED_CONFIG_ARCHIVE_MAP = {"openai-gpt": "https://s3.amazonaws.com/models.huggingface.co/bert/openai-gpt-config.json"}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def load_tf_weights_in_openai_gpt(model, openai_checkpoint_folder_path):
 | 
			
		||||
    """ Load tf pre-trained weights in a pytorch model (from NumPy arrays here)
 | 
			
		||||
    """
 | 
			
		||||
    import re
 | 
			
		||||
    import numpy as np
 | 
			
		||||
    print("Loading weights...")
 | 
			
		||||
    names = json.load(open(openai_checkpoint_folder_path + '/parameters_names.json', "r", encoding='utf-8'))
 | 
			
		||||
    shapes = json.load(open(openai_checkpoint_folder_path + '/params_shapes.json', "r", encoding='utf-8'))
 | 
			
		||||
    offsets = np.cumsum([np.prod(shape) for shape in shapes])
 | 
			
		||||
    init_params = [np.load(openai_checkpoint_folder_path + '/params_{}.npy'.format(n)) for n in range(10)]
 | 
			
		||||
    init_params = np.split(np.concatenate(init_params, 0), offsets)[:-1]
 | 
			
		||||
    init_params = [param.reshape(shape) for param, shape in zip(init_params, shapes)]
 | 
			
		||||
 | 
			
		||||
    # This was used when we had a single embedding matrix for positions and tokens
 | 
			
		||||
    # init_params[0] = np.concatenate([init_params[1], init_params[0]], 0)
 | 
			
		||||
    # del init_params[1]
 | 
			
		||||
    init_params = [arr.squeeze() for arr in init_params]
 | 
			
		||||
 | 
			
		||||
    try:
 | 
			
		||||
        assert model.tokens_embed.weight.shape == init_params[1].shape
 | 
			
		||||
        assert model.positions_embed.weight.shape == init_params[0].shape
 | 
			
		||||
    except AssertionError as e:
 | 
			
		||||
        e.args += (model.tokens_embed.weight.shape, init_params[1].shape)
 | 
			
		||||
        e.args += (model.positions_embed.weight.shape, init_params[0].shape)
 | 
			
		||||
        raise
 | 
			
		||||
 | 
			
		||||
    model.tokens_embed.weight.data = torch.from_numpy(init_params[1])
 | 
			
		||||
    model.positions_embed.weight.data = torch.from_numpy(init_params[0])
 | 
			
		||||
    names.pop(0)
 | 
			
		||||
    # Pop position and token embedding arrays
 | 
			
		||||
    init_params.pop(0)
 | 
			
		||||
    init_params.pop(0)
 | 
			
		||||
 | 
			
		||||
    for name, array in zip(names, init_params): # names[1:n_transfer], init_params[1:n_transfer]):
 | 
			
		||||
        name = name[6:]  # skip "model/"
 | 
			
		||||
        assert name[-2:] == ":0"
 | 
			
		||||
        name = name[:-2]
 | 
			
		||||
        name = name.split('/')
 | 
			
		||||
        pointer = model
 | 
			
		||||
        for m_name in name:
 | 
			
		||||
            if re.fullmatch(r'[A-Za-z]+\d+', m_name):
 | 
			
		||||
                l = re.split(r'(\d+)', m_name)
 | 
			
		||||
            else:
 | 
			
		||||
                l = [m_name]
 | 
			
		||||
            if l[0] == 'g':
 | 
			
		||||
                pointer = getattr(pointer, 'weight')
 | 
			
		||||
            elif l[0] == 'b':
 | 
			
		||||
                pointer = getattr(pointer, 'bias')
 | 
			
		||||
            elif l[0] == 'w':
 | 
			
		||||
                pointer = getattr(pointer, 'weight')
 | 
			
		||||
            else:
 | 
			
		||||
                pointer = getattr(pointer, l[0])
 | 
			
		||||
            if len(l) >= 2:
 | 
			
		||||
                num = int(l[1])
 | 
			
		||||
                pointer = pointer[num]
 | 
			
		||||
        try:
 | 
			
		||||
            assert pointer.shape == array.shape
 | 
			
		||||
        except AssertionError as e:
 | 
			
		||||
            e.args += (pointer.shape, array.shape)
 | 
			
		||||
            raise
 | 
			
		||||
        try:
 | 
			
		||||
            assert pointer.shape == array.shape
 | 
			
		||||
        except AssertionError as e:
 | 
			
		||||
            e.args += (pointer.shape, array.shape)
 | 
			
		||||
            raise
 | 
			
		||||
        print("Initialize PyTorch weight {}".format(name))
 | 
			
		||||
        pointer.data = torch.from_numpy(array)
 | 
			
		||||
    return model
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def gelu(x):
 | 
			
		||||
    return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def swish(x):
 | 
			
		||||
    return x * torch.sigmoid(x)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
ACT_FNS = {"relu": nn.ReLU, "swish": swish, "gelu": gelu}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class OpenAIGPTConfig(object):
 | 
			
		||||
    """Configuration class to store the configuration of a `OpenAIGPTModel`.
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    def __init__(
 | 
			
		||||
        self,
 | 
			
		||||
        vocab_size_or_config_json_file=40478,
 | 
			
		||||
        n_special=0,
 | 
			
		||||
        n_positions=512,
 | 
			
		||||
        n_ctx=512,
 | 
			
		||||
        n_embd=768,
 | 
			
		||||
        n_layer=12,
 | 
			
		||||
        n_head=12,
 | 
			
		||||
        afn="gelu",
 | 
			
		||||
        resid_pdrop=0.1,
 | 
			
		||||
        embd_pdrop=0.1,
 | 
			
		||||
        attn_pdrop=0.1,
 | 
			
		||||
        layer_norm_epsilon=1e-5,
 | 
			
		||||
        initializer_range=0.02,
 | 
			
		||||
    ):
 | 
			
		||||
        """Constructs OpenAIGPTConfig.
 | 
			
		||||
 | 
			
		||||
        Args:
 | 
			
		||||
            vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `OpenAIGPTModel` or a configuration json file.
 | 
			
		||||
            n_special: The number of special tokens to learn during fine-tuning ('[SEP]', '[CLF]', ...)
 | 
			
		||||
            n_positions: Number of positional embeddings.
 | 
			
		||||
            n_ctx: Size of the causal mask (usually same as n_positions).
 | 
			
		||||
            n_embd: Dimensionality of the embeddings and hidden states.
 | 
			
		||||
            n_layer: Number of hidden layers in the Transformer encoder.
 | 
			
		||||
            n_head: Number of attention heads for each attention layer in
 | 
			
		||||
                the Transformer encoder.
 | 
			
		||||
            afn: The non-linear activation function (function or string) in the
 | 
			
		||||
                encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
 | 
			
		||||
            resid_pdrop: The dropout probabilitiy for all fully connected
 | 
			
		||||
                layers in the embeddings, encoder, and pooler.
 | 
			
		||||
            attn_pdrop: The dropout ratio for the attention
 | 
			
		||||
                probabilities.
 | 
			
		||||
            embd_pdrop: The dropout ratio for the embeddings.
 | 
			
		||||
            layer_norm_epsilon: epsilon to use in the layer norm layers
 | 
			
		||||
            initializer_range: The sttdev of the truncated_normal_initializer for
 | 
			
		||||
                initializing all weight matrices.
 | 
			
		||||
        """
 | 
			
		||||
        if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2
 | 
			
		||||
                        and isinstance(vocab_size_or_config_json_file, unicode)):
 | 
			
		||||
            with open(vocab_size_or_config_json_file, "r", encoding="utf-8") as reader:
 | 
			
		||||
                json_config = json.loads(reader.read())
 | 
			
		||||
            for key, value in json_config.items():
 | 
			
		||||
                self.__dict__[key] = value
 | 
			
		||||
        elif isinstance(vocab_size_or_config_json_file, int):
 | 
			
		||||
            self.vocab_size = vocab_size_or_config_json_file
 | 
			
		||||
            self.n_special = n_special
 | 
			
		||||
            self.n_ctx = n_ctx
 | 
			
		||||
            self.n_positions = n_positions
 | 
			
		||||
            self.n_embd = n_embd
 | 
			
		||||
            self.n_layer = n_layer
 | 
			
		||||
            self.n_head = n_head
 | 
			
		||||
            self.afn = afn
 | 
			
		||||
            self.resid_pdrop = resid_pdrop
 | 
			
		||||
            self.embd_pdrop = embd_pdrop
 | 
			
		||||
            self.attn_pdrop = attn_pdrop
 | 
			
		||||
            self.layer_norm_epsilon = layer_norm_epsilon
 | 
			
		||||
            self.initializer_range = initializer_range
 | 
			
		||||
        else:
 | 
			
		||||
            raise ValueError(
 | 
			
		||||
                "First argument must be either a vocabulary size (int)"
 | 
			
		||||
                "or the path to a pretrained model config file (str)"
 | 
			
		||||
            )
 | 
			
		||||
 | 
			
		||||
    @property
 | 
			
		||||
    def total_tokens_embeddings(self):
 | 
			
		||||
        return self.vocab_size + self.n_special
 | 
			
		||||
 | 
			
		||||
    @classmethod
 | 
			
		||||
    def from_dict(cls, json_object):
 | 
			
		||||
        """Constructs a `OpenAIGPTConfig` from a Python dictionary of parameters."""
 | 
			
		||||
        config = OpenAIGPTConfig(vocab_size_or_config_json_file=-1)
 | 
			
		||||
        for key, value in json_object.items():
 | 
			
		||||
            config.__dict__[key] = value
 | 
			
		||||
        return config
 | 
			
		||||
 | 
			
		||||
    @classmethod
 | 
			
		||||
    def from_json_file(cls, json_file):
 | 
			
		||||
        """Constructs a `OpenAIGPTConfig` from a json file of parameters."""
 | 
			
		||||
        with open(json_file, "r", encoding="utf-8") as reader:
 | 
			
		||||
            text = reader.read()
 | 
			
		||||
        return cls.from_dict(json.loads(text))
 | 
			
		||||
 | 
			
		||||
    def __repr__(self):
 | 
			
		||||
        return str(self.to_json_string())
 | 
			
		||||
 | 
			
		||||
    def to_dict(self):
 | 
			
		||||
        """Serializes this instance to a Python dictionary."""
 | 
			
		||||
        output = copy.deepcopy(self.__dict__)
 | 
			
		||||
        return output
 | 
			
		||||
 | 
			
		||||
    def to_json_string(self):
 | 
			
		||||
        """Serializes this instance to a JSON string."""
 | 
			
		||||
        return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
 | 
			
		||||
 | 
			
		||||
    def to_json_file(self, json_file_path):
 | 
			
		||||
        """ Save this instance to a json file."""
 | 
			
		||||
        with open(json_file_path, "w", encoding='utf-8') as writer:
 | 
			
		||||
            writer.write(self.to_json_string())
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class Conv1D(nn.Module):
 | 
			
		||||
    def __init__(self, nf, rf, nx):
 | 
			
		||||
        super(Conv1D, self).__init__()
 | 
			
		||||
        self.rf = rf
 | 
			
		||||
        self.nf = nf
 | 
			
		||||
        if rf == 1:  # faster 1x1 conv
 | 
			
		||||
            w = torch.empty(nx, nf)
 | 
			
		||||
            nn.init.normal_(w, std=0.02)
 | 
			
		||||
            self.weight = Parameter(w)
 | 
			
		||||
            self.bias = Parameter(torch.zeros(nf))
 | 
			
		||||
        else:  # was used to train LM
 | 
			
		||||
            raise NotImplementedError
 | 
			
		||||
 | 
			
		||||
    def forward(self, x):
 | 
			
		||||
        if self.rf == 1:
 | 
			
		||||
            size_out = x.size()[:-1] + (self.nf,)
 | 
			
		||||
            x = torch.addmm(self.bias, x.view(-1, x.size(-1)), self.weight)
 | 
			
		||||
            x = x.view(*size_out)
 | 
			
		||||
        else:
 | 
			
		||||
            raise NotImplementedError
 | 
			
		||||
        return x
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class Attention(nn.Module):
 | 
			
		||||
    def __init__(self, nx, n_ctx, config, scale=False):
 | 
			
		||||
        super(Attention, self).__init__()
 | 
			
		||||
        n_state = nx  # in Attention: n_state=768 (nx=n_embd)
 | 
			
		||||
        # [switch nx => n_state from Block to Attention to keep identical to TF implem]
 | 
			
		||||
        assert n_state % config.n_head == 0
 | 
			
		||||
        self.register_buffer("bias", torch.tril(torch.ones(n_ctx, n_ctx)).view(1, 1, n_ctx, n_ctx))
 | 
			
		||||
        self.n_head = config.n_head
 | 
			
		||||
        self.split_size = n_state
 | 
			
		||||
        self.scale = scale
 | 
			
		||||
        self.c_attn = Conv1D(n_state * 3, 1, nx)
 | 
			
		||||
        self.c_proj = Conv1D(n_state, 1, nx)
 | 
			
		||||
        self.attn_dropout = nn.Dropout(config.attn_pdrop)
 | 
			
		||||
        self.resid_dropout = nn.Dropout(config.resid_pdrop)
 | 
			
		||||
 | 
			
		||||
    def _attn(self, q, k, v):
 | 
			
		||||
        w = torch.matmul(q, k)
 | 
			
		||||
        if self.scale:
 | 
			
		||||
            w = w / math.sqrt(v.size(-1))
 | 
			
		||||
        # w = w * self.bias + -1e9 * (1 - self.bias)  # TF implem method: mask_attn_weights
 | 
			
		||||
        # XD: self.b may be larger than w, so we need to crop it
 | 
			
		||||
        b = self.bias[:, :, : w.size(-2), : w.size(-1)]
 | 
			
		||||
        w = w * b + -1e9 * (1 - b)
 | 
			
		||||
 | 
			
		||||
        w = nn.Softmax(dim=-1)(w)
 | 
			
		||||
        w = self.attn_dropout(w)
 | 
			
		||||
        return torch.matmul(w, v)
 | 
			
		||||
 | 
			
		||||
    def merge_heads(self, x):
 | 
			
		||||
        x = x.permute(0, 2, 1, 3).contiguous()
 | 
			
		||||
        new_x_shape = x.size()[:-2] + (x.size(-2) * x.size(-1),)
 | 
			
		||||
        return x.view(*new_x_shape)  # in Tensorflow implem: fct merge_states
 | 
			
		||||
 | 
			
		||||
    def split_heads(self, x, k=False):
 | 
			
		||||
        new_x_shape = x.size()[:-1] + (self.n_head, x.size(-1) // self.n_head)
 | 
			
		||||
        x = x.view(*new_x_shape)  # in Tensorflow implem: fct split_states
 | 
			
		||||
        if k:
 | 
			
		||||
            return x.permute(0, 2, 3, 1)
 | 
			
		||||
        else:
 | 
			
		||||
            return x.permute(0, 2, 1, 3)
 | 
			
		||||
 | 
			
		||||
    def forward(self, x):
 | 
			
		||||
        x = self.c_attn(x)
 | 
			
		||||
        query, key, value = x.split(self.split_size, dim=2)
 | 
			
		||||
        query = self.split_heads(query)
 | 
			
		||||
        key = self.split_heads(key, k=True)
 | 
			
		||||
        value = self.split_heads(value)
 | 
			
		||||
        a = self._attn(query, key, value)
 | 
			
		||||
        a = self.merge_heads(a)
 | 
			
		||||
        a = self.c_proj(a)
 | 
			
		||||
        a = self.resid_dropout(a)
 | 
			
		||||
        return a
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class MLP(nn.Module):
 | 
			
		||||
    def __init__(self, n_state, config):  # in MLP: n_state=3072 (4 * n_embd)
 | 
			
		||||
        super(MLP, self).__init__()
 | 
			
		||||
        nx = config.n_embd
 | 
			
		||||
        self.c_fc = Conv1D(n_state, 1, nx)
 | 
			
		||||
        self.c_proj = Conv1D(nx, 1, n_state)
 | 
			
		||||
        self.act = ACT_FNS[config.afn]
 | 
			
		||||
        self.dropout = nn.Dropout(config.resid_pdrop)
 | 
			
		||||
 | 
			
		||||
    def forward(self, x):
 | 
			
		||||
        h = self.act(self.c_fc(x))
 | 
			
		||||
        h2 = self.c_proj(h)
 | 
			
		||||
        return self.dropout(h2)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class Block(nn.Module):
 | 
			
		||||
    def __init__(self, n_ctx, config, scale=False):
 | 
			
		||||
        super(Block, self).__init__()
 | 
			
		||||
        nx = config.n_embd
 | 
			
		||||
        self.attn = Attention(nx, n_ctx, config, scale)
 | 
			
		||||
        self.ln_1 = LayerNorm(nx, eps=config.layer_norm_epsilon)
 | 
			
		||||
        self.mlp = MLP(4 * nx, config)
 | 
			
		||||
        self.ln_2 = LayerNorm(nx, eps=config.layer_norm_epsilon)
 | 
			
		||||
 | 
			
		||||
    def forward(self, x):
 | 
			
		||||
        a = self.attn(x)
 | 
			
		||||
        n = self.ln_1(x + a)
 | 
			
		||||
        m = self.mlp(n)
 | 
			
		||||
        h = self.ln_2(n + m)
 | 
			
		||||
        return h
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class OpenAIGPTLMHead(nn.Module):
 | 
			
		||||
    """ Language Model Head for the transformer """
 | 
			
		||||
 | 
			
		||||
    def __init__(self, model_embeddings_weights, config):
 | 
			
		||||
        super(OpenAIGPTLMHead, self).__init__()
 | 
			
		||||
        self.n_embd = config.n_embd
 | 
			
		||||
        self.set_embeddings_weights(model_embeddings_weights)
 | 
			
		||||
 | 
			
		||||
    def set_embeddings_weights(self, model_embeddings_weights):
 | 
			
		||||
        embed_shape = model_embeddings_weights.shape
 | 
			
		||||
        self.decoder = nn.Linear(embed_shape[1], embed_shape[0], bias=False)
 | 
			
		||||
        self.decoder.weight = model_embeddings_weights  # Tied weights
 | 
			
		||||
 | 
			
		||||
    def forward(self, hidden_state):
 | 
			
		||||
        # Truncated Language modeling logits (we remove the last token)
 | 
			
		||||
        # h_trunc = h[:, :-1].contiguous().view(-1, self.n_embd)
 | 
			
		||||
        lm_logits = self.decoder(hidden_state)
 | 
			
		||||
        return lm_logits
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class OpenAIGPTMultipleChoiceHead(nn.Module):
 | 
			
		||||
    """ Classifier Head for the transformer """
 | 
			
		||||
 | 
			
		||||
    def __init__(self, config):
 | 
			
		||||
        super(OpenAIGPTMultipleChoiceHead, self).__init__()
 | 
			
		||||
        self.n_embd = config.n_embd
 | 
			
		||||
        # self.multiple_choice_token = multiple_choice_token
 | 
			
		||||
        self.dropout = nn.Dropout2d(config.resid_pdrop)  # To reproduce the noise_shape parameter of TF implementation
 | 
			
		||||
        self.linear = nn.Linear(config.n_embd, 1)
 | 
			
		||||
 | 
			
		||||
        nn.init.normal_(self.linear.weight, std=0.02)
 | 
			
		||||
        nn.init.normal_(self.linear.bias, 0)
 | 
			
		||||
 | 
			
		||||
    def forward(self, hidden_states, mc_token_ids):
 | 
			
		||||
        # Classification logits
 | 
			
		||||
        # hidden_state (bsz, num_choices, seq_length, hidden_size)
 | 
			
		||||
        # mc_token_ids (bsz, num_choices)
 | 
			
		||||
        mc_token_ids = mc_token_ids.unsqueeze(-1).unsqueeze(-1).expand(-1, -1, -1, hidden_states.size(-1))
 | 
			
		||||
        # (bsz, num_choices, 1, hidden_size)
 | 
			
		||||
        multiple_choice_h = hidden_states.gather(2, mc_token_ids).squeeze(2)
 | 
			
		||||
        # (bsz, num_choices, hidden_size)
 | 
			
		||||
        multiple_choice_h = self.dropout(multiple_choice_h.transpose(1, 2)).transpose(1, 2)
 | 
			
		||||
        multiple_choice_logits = self.linear(multiple_choice_h).squeeze(-1)
 | 
			
		||||
        # (bsz, num_choices)
 | 
			
		||||
        return multiple_choice_logits
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class OpenAIGPTPreTrainedModel(nn.Module):
 | 
			
		||||
    """ An abstract class to handle weights initialization and
 | 
			
		||||
        a simple interface for dowloading and loading pretrained models.
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    def __init__(self, config, *inputs, **kwargs):
 | 
			
		||||
        super(OpenAIGPTPreTrainedModel, self).__init__()
 | 
			
		||||
        if not isinstance(config, OpenAIGPTConfig):
 | 
			
		||||
            raise ValueError(
 | 
			
		||||
                "Parameter config in `{}(config)` should be an instance of class `OpenAIGPTConfig`. "
 | 
			
		||||
                "To create a model from a pretrained model use "
 | 
			
		||||
                "`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
 | 
			
		||||
                    self.__class__.__name__, self.__class__.__name__
 | 
			
		||||
                )
 | 
			
		||||
            )
 | 
			
		||||
        self.config = config
 | 
			
		||||
 | 
			
		||||
    def init_weights(self, module):
 | 
			
		||||
        """ Initialize the weights.
 | 
			
		||||
        """
 | 
			
		||||
        if isinstance(module, (nn.Linear, nn.Embedding)):
 | 
			
		||||
            # Slightly different from the TF version which uses truncated_normal for initialization
 | 
			
		||||
            # cf https://github.com/pytorch/pytorch/pull/5617
 | 
			
		||||
            module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
 | 
			
		||||
        elif isinstance(module, LayerNorm):
 | 
			
		||||
            module.bias.data.zero_()
 | 
			
		||||
            module.weight.data.fill_(1.0)
 | 
			
		||||
        if isinstance(module, nn.Linear) and module.bias is not None:
 | 
			
		||||
            module.bias.data.zero_()
 | 
			
		||||
 | 
			
		||||
    def set_num_special_tokens(self, num_special_tokens):
 | 
			
		||||
        pass
 | 
			
		||||
 | 
			
		||||
    @classmethod
 | 
			
		||||
    def from_pretrained(
 | 
			
		||||
        cls, pretrained_model_name_or_path, num_special_tokens=None, state_dict=None, cache_dir=None, from_tf=False, *inputs, **kwargs
 | 
			
		||||
    ):
 | 
			
		||||
        """
 | 
			
		||||
        Instantiate a OpenAIGPTPreTrainedModel from a pre-trained model file or a pytorch state dict.
 | 
			
		||||
        Download and cache the pre-trained model file if needed.
 | 
			
		||||
 | 
			
		||||
        Params:
 | 
			
		||||
            pretrained_model_name_or_path: either:
 | 
			
		||||
                - a str with the name of a pre-trained model to load selected in the list of:
 | 
			
		||||
                    . `openai-gpt`
 | 
			
		||||
                - a path or url to a pretrained model archive containing:
 | 
			
		||||
                    . `openai_gpt_config.json` a configuration file for the model
 | 
			
		||||
                    . `pytorch_model.bin` a PyTorch dump of a OpenAIGPTModel instance
 | 
			
		||||
                - a path or url to a pretrained model archive containing:
 | 
			
		||||
                    . `bert_config.json` a configuration file for the model
 | 
			
		||||
                    . a series of NumPy files containing OpenAI TensorFlow trained weights
 | 
			
		||||
            from_tf: should we load the weights from a locally saved TensorFlow checkpoint
 | 
			
		||||
            cache_dir: an optional path to a folder in which the pre-trained models will be cached.
 | 
			
		||||
            state_dict: an optional state dictionnary (collections.OrderedDict object) to use instead of pre-trained models
 | 
			
		||||
            *inputs, **kwargs: additional input for the specific Bert class
 | 
			
		||||
                (ex: num_labels for BertForSequenceClassification)
 | 
			
		||||
        """
 | 
			
		||||
        if pretrained_model_name_or_path in PRETRAINED_MODEL_ARCHIVE_MAP:
 | 
			
		||||
            archive_file = PRETRAINED_MODEL_ARCHIVE_MAP[pretrained_model_name_or_path]
 | 
			
		||||
            config_file = PRETRAINED_CONFIG_ARCHIVE_MAP[pretrained_model_name_or_path]
 | 
			
		||||
        else:
 | 
			
		||||
            archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)
 | 
			
		||||
            config_file = os.path.join(pretrained_model_name_or_path, CONFIG_NAME)
 | 
			
		||||
        # redirect to the cache, if necessary
 | 
			
		||||
        try:
 | 
			
		||||
            resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir)
 | 
			
		||||
            resolved_config_file = cached_path(config_file, cache_dir=cache_dir)
 | 
			
		||||
        except EnvironmentError:
 | 
			
		||||
            logger.error(
 | 
			
		||||
                "Model name '{}' was not found in model name list ({}). "
 | 
			
		||||
                "We assumed '{}' was a path or url but couldn't find files {} and {} "
 | 
			
		||||
                "at this path or url.".format(
 | 
			
		||||
                    pretrained_model_name_or_path, ", ".join(PRETRAINED_MODEL_ARCHIVE_MAP.keys()), pretrained_model_name_or_path,
 | 
			
		||||
                    archive_file, config_file
 | 
			
		||||
                )
 | 
			
		||||
            )
 | 
			
		||||
            return None
 | 
			
		||||
        if resolved_archive_file == archive_file and resolved_config_file == config_file:
 | 
			
		||||
            logger.info("loading weights file {}".format(archive_file))
 | 
			
		||||
            logger.info("loading configuration file {}".format(config_file))
 | 
			
		||||
        else:
 | 
			
		||||
            logger.info("loading weights file {} from cache at {}".format(
 | 
			
		||||
                archive_file, resolved_archive_file))
 | 
			
		||||
            logger.info("loading configuration file {} from cache at {}".format(
 | 
			
		||||
                config_file, resolved_config_file))
 | 
			
		||||
        # Load config
 | 
			
		||||
        config = OpenAIGPTConfig.from_json_file(resolved_config_file)
 | 
			
		||||
        logger.info("Model config {}".format(config))
 | 
			
		||||
        # Instantiate model.
 | 
			
		||||
        model = cls(config, *inputs, **kwargs)
 | 
			
		||||
        if state_dict is None and not from_tf:
 | 
			
		||||
            state_dict = torch.load(resolved_archive_file, map_location='cpu')
 | 
			
		||||
        if from_tf:
 | 
			
		||||
            # Directly load from a TensorFlow checkpoint (stored as NumPy array)
 | 
			
		||||
            return load_tf_weights_in_openai_gpt(model, resolved_archive_file)
 | 
			
		||||
 | 
			
		||||
        old_keys = []
 | 
			
		||||
        new_keys = []
 | 
			
		||||
        for key in state_dict.keys():
 | 
			
		||||
            new_key = None
 | 
			
		||||
            if key.endswith(".g"):
 | 
			
		||||
                new_key = key[:-2] + ".weight"
 | 
			
		||||
            elif key.endswith(".b"):
 | 
			
		||||
                new_key = key[:-2] + ".bias"
 | 
			
		||||
            elif key.endswith(".w"):
 | 
			
		||||
                new_key = key[:-2] + ".weight"
 | 
			
		||||
            if new_key:
 | 
			
		||||
                old_keys.append(key)
 | 
			
		||||
                new_keys.append(new_key)
 | 
			
		||||
        for old_key, new_key in zip(old_keys, new_keys):
 | 
			
		||||
            state_dict[new_key] = state_dict.pop(old_key)
 | 
			
		||||
 | 
			
		||||
        missing_keys = []
 | 
			
		||||
        unexpected_keys = []
 | 
			
		||||
        error_msgs = []
 | 
			
		||||
        # copy state_dict so _load_from_state_dict can modify it
 | 
			
		||||
        metadata = getattr(state_dict, "_metadata", None)
 | 
			
		||||
        state_dict = state_dict.copy()
 | 
			
		||||
        if metadata is not None:
 | 
			
		||||
            state_dict._metadata = metadata
 | 
			
		||||
 | 
			
		||||
        def load(module, prefix=""):
 | 
			
		||||
            local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
 | 
			
		||||
            module._load_from_state_dict(
 | 
			
		||||
                state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs
 | 
			
		||||
            )
 | 
			
		||||
            for name, child in module._modules.items():
 | 
			
		||||
                if child is not None:
 | 
			
		||||
                    load(child, prefix + name + ".")
 | 
			
		||||
 | 
			
		||||
        start_model = model
 | 
			
		||||
        if hasattr(model, "transformer") and all(not s.startswith('transformer.') for s in state_dict.keys()):
 | 
			
		||||
            start_model = model.transformer
 | 
			
		||||
        load(start_model, prefix="")
 | 
			
		||||
 | 
			
		||||
        if len(missing_keys) > 0:
 | 
			
		||||
            logger.info(
 | 
			
		||||
                "Weights of {} not initialized from pretrained model: {}".format(model.__class__.__name__, missing_keys)
 | 
			
		||||
            )
 | 
			
		||||
        if len(unexpected_keys) > 0:
 | 
			
		||||
            logger.info(
 | 
			
		||||
                "Weights from pretrained model not used in {}: {}".format(model.__class__.__name__, unexpected_keys)
 | 
			
		||||
            )
 | 
			
		||||
        if len(error_msgs) > 0:
 | 
			
		||||
            raise RuntimeError(
 | 
			
		||||
                "Error(s) in loading state_dict for {}:\n\t{}".format(model.__class__.__name__, "\n\t".join(error_msgs))
 | 
			
		||||
            )
 | 
			
		||||
 | 
			
		||||
        # Add additional embeddings for special tokens if needed
 | 
			
		||||
        # This step also make sure we are still sharing the output and input embeddings after loading weights
 | 
			
		||||
        model.set_num_special_tokens(num_special_tokens if num_special_tokens is not None else config.n_special)
 | 
			
		||||
        return model
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class OpenAIGPTModel(OpenAIGPTPreTrainedModel):
 | 
			
		||||
    """OpenAI GPT model ("Improving Language Understanding by Generative Pre-Training").
 | 
			
		||||
 | 
			
		||||
    OpenAI GPT use a single embedding matrix to store the word and special embeddings.
 | 
			
		||||
    Special tokens embeddings are additional tokens that are not pre-trained: [SEP], [CLS]...
 | 
			
		||||
    Special tokens need to be trained during the fine-tuning if you use them.
 | 
			
		||||
    The number of special embeddings can be controled using the `set_num_special_tokens(num_special_tokens)` function.
 | 
			
		||||
 | 
			
		||||
    The embeddings are ordered as follow in the token embeddings matrice:
 | 
			
		||||
        [0,                                                         ----------------------
 | 
			
		||||
         ...                                                        -> word embeddings
 | 
			
		||||
         config.vocab_size - 1,                                     ______________________
 | 
			
		||||
         config.vocab_size,
 | 
			
		||||
         ...                                                        -> special embeddings
 | 
			
		||||
         config.vocab_size + config.n_special - 1]                  ______________________
 | 
			
		||||
 | 
			
		||||
    where total_tokens_embeddings can be obtained as config.total_tokens_embeddings and is:
 | 
			
		||||
        total_tokens_embeddings = config.vocab_size + config.n_special
 | 
			
		||||
    You should use the associate indices to index the embeddings.
 | 
			
		||||
 | 
			
		||||
    Params:
 | 
			
		||||
        config: a OpenAIGPTConfig class instance with the configuration to build a new model
 | 
			
		||||
 | 
			
		||||
    Inputs:
 | 
			
		||||
        `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] (or more generally [d_1, ..., d_n, sequence_length]
 | 
			
		||||
            were d_1 ... d_n are arbitrary dimensions) with the word BPE token indices selected in the range [0, total_tokens_embeddings[
 | 
			
		||||
        `position_ids`: an optional torch.LongTensor with the same shape as input_ids
 | 
			
		||||
            with the position indices (selected in the range [0, config.n_positions - 1[.
 | 
			
		||||
        `token_type_ids`: an optional torch.LongTensor with the same shape as input_ids
 | 
			
		||||
            You can use it to add a third type of embedding to each input token in the sequence
 | 
			
		||||
            (the previous two being the word and position embeddings).
 | 
			
		||||
            The input, position and token_type embeddings are summed inside the Transformer before the first
 | 
			
		||||
            self-attention block.
 | 
			
		||||
 | 
			
		||||
    Outputs:
 | 
			
		||||
        `hidden_states`: the encoded-hidden-states at the top of the model
 | 
			
		||||
            as a torch.FloatTensor of size [batch_size, sequence_length, hidden_size]
 | 
			
		||||
            (or more generally [d_1, ..., d_n, hidden_size] were d_1 ... d_n are the dimension of input_ids)
 | 
			
		||||
 | 
			
		||||
    Example usage:
 | 
			
		||||
    ```python
 | 
			
		||||
    # Already been converted into BPE token ids
 | 
			
		||||
    input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
 | 
			
		||||
 | 
			
		||||
    config = modeling_openai.OpenAIGPTConfig()
 | 
			
		||||
 | 
			
		||||
    model = modeling_openai.OpenAIGPTModel(config)
 | 
			
		||||
    hidden_states = model(input_ids)
 | 
			
		||||
    ```
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    def __init__(self, config):
 | 
			
		||||
        super(OpenAIGPTModel, self).__init__(config)
 | 
			
		||||
        num_tokens = config.vocab_size + config.n_special
 | 
			
		||||
        self.tokens_embed = nn.Embedding(num_tokens, config.n_embd)
 | 
			
		||||
        self.positions_embed = nn.Embedding(config.n_positions, config.n_embd)
 | 
			
		||||
        self.drop = nn.Dropout(config.embd_pdrop)
 | 
			
		||||
        block = Block(config.n_ctx, config, scale=True)
 | 
			
		||||
        self.h = nn.ModuleList([copy.deepcopy(block) for _ in range(config.n_layer)])
 | 
			
		||||
 | 
			
		||||
        self.apply(self.init_weights)
 | 
			
		||||
        # nn.init.normal_(self.embed.weight, std=0.02)
 | 
			
		||||
 | 
			
		||||
    def set_num_special_tokens(self, num_special_tokens):
 | 
			
		||||
        " Update input embeddings with new embedding matrice if needed "
 | 
			
		||||
        if self.config.n_special == num_special_tokens:
 | 
			
		||||
            return
 | 
			
		||||
        # Update config
 | 
			
		||||
        self.config.n_special = num_special_tokens
 | 
			
		||||
        # Build new embeddings and initialize all new embeddings (in particular the special tokens)
 | 
			
		||||
        old_embed = self.tokens_embed
 | 
			
		||||
        self.tokens_embed = nn.Embedding(self.config.total_tokens_embeddings, self.config.n_embd)
 | 
			
		||||
        self.tokens_embed.to(old_embed.weight.device)
 | 
			
		||||
        self.init_weights(self.tokens_embed)
 | 
			
		||||
        # Copy word embeddings from the previous weights
 | 
			
		||||
        self.tokens_embed.weight.data[:self.config.vocab_size, :] = old_embed.weight.data[:self.config.vocab_size, :]
 | 
			
		||||
 | 
			
		||||
    def forward(self, input_ids, position_ids=None, token_type_ids=None):
 | 
			
		||||
        if position_ids is None:
 | 
			
		||||
            # This was used when we had a single embedding matrice from position and token embeddings
 | 
			
		||||
            # start = self.config.vocab_size + self.config.n_special
 | 
			
		||||
            # end = start + input_ids.size(-1)
 | 
			
		||||
            # position_ids = torch.arange(start, end, dtype=torch.long, device=input_ids.device)
 | 
			
		||||
            position_ids = torch.arange(input_ids.size(-1), dtype=torch.long, device=input_ids.device)
 | 
			
		||||
            position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
 | 
			
		||||
 | 
			
		||||
        input_shape = input_ids.size()
 | 
			
		||||
        input_ids = input_ids.view(-1, input_ids.size(-1))
 | 
			
		||||
        position_ids = position_ids.view(-1, position_ids.size(-1))
 | 
			
		||||
 | 
			
		||||
        inputs_embeds = self.tokens_embed(input_ids)
 | 
			
		||||
        position_embeds = self.positions_embed(position_ids)
 | 
			
		||||
        if token_type_ids is not None:
 | 
			
		||||
            token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1))
 | 
			
		||||
            token_type_embeds = self.tokens_embed(token_type_ids)
 | 
			
		||||
        else:
 | 
			
		||||
            token_type_embeds = 0
 | 
			
		||||
        # Add the position information to the input embeddings
 | 
			
		||||
        # h = e.sum(dim=2)
 | 
			
		||||
        hidden_states = inputs_embeds + position_embeds + token_type_embeds
 | 
			
		||||
        for block in self.h:
 | 
			
		||||
            hidden_states = block(hidden_states)
 | 
			
		||||
        output_shape = input_shape + (hidden_states.size(-1),)
 | 
			
		||||
        return hidden_states.view(*output_shape)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class OpenAIGPTLMHeadModel(OpenAIGPTPreTrainedModel):
 | 
			
		||||
    """OpenAI GPT model with a Language Modeling head ("Improving Language Understanding by Generative Pre-Training").
 | 
			
		||||
 | 
			
		||||
    OpenAI GPT use a single embedding matrix to store the word and special embeddings.
 | 
			
		||||
    Special tokens embeddings are additional tokens that are not pre-trained: [SEP], [CLS]...
 | 
			
		||||
    Special tokens need to be trained during the fine-tuning if you use them.
 | 
			
		||||
    The number of special embeddings can be controled using the `set_num_special_tokens(num_special_tokens)` function.
 | 
			
		||||
 | 
			
		||||
    The embeddings are ordered as follow in the token embeddings matrice:
 | 
			
		||||
        [0,                                                         ----------------------
 | 
			
		||||
         ...                                                        -> word embeddings
 | 
			
		||||
         config.vocab_size - 1,                                     ______________________
 | 
			
		||||
         config.vocab_size,
 | 
			
		||||
         ...                                                        -> special embeddings
 | 
			
		||||
         config.vocab_size + config.n_special - 1]                  ______________________
 | 
			
		||||
 | 
			
		||||
    where total_tokens_embeddings can be obtained as config.total_tokens_embeddings and is:
 | 
			
		||||
        total_tokens_embeddings = config.vocab_size + config.n_special
 | 
			
		||||
    You should use the associate indices to index the embeddings.
 | 
			
		||||
 | 
			
		||||
    Params:
 | 
			
		||||
        config: a OpenAIGPTConfig class instance with the configuration to build a new model
 | 
			
		||||
 | 
			
		||||
    Inputs:
 | 
			
		||||
        `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] (or more generally [d_1, ..., d_n, sequence_length]
 | 
			
		||||
            were d_1 ... d_n are arbitrary dimensions) with the word BPE token indices selected in the range [0, total_tokens_embeddings[
 | 
			
		||||
        `position_ids`: an optional torch.LongTensor with the same shape as input_ids
 | 
			
		||||
            with the position indices (selected in the range [0, config.n_positions - 1[.
 | 
			
		||||
        `token_type_ids`: an optional torch.LongTensor with the same shape as input_ids
 | 
			
		||||
            You can use it to add a third type of embedding to each input token in the sequence
 | 
			
		||||
            (the previous two being the word and position embeddings).
 | 
			
		||||
            The input, position and token_type embeddings are summed inside the Transformer before the first
 | 
			
		||||
            self-attention block.
 | 
			
		||||
        `lm_labels`: optional language modeling labels: torch.LongTensor of shape [batch_size, sequence_length]
 | 
			
		||||
            with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss
 | 
			
		||||
            is only computed for the labels set in [0, ..., vocab_size]
 | 
			
		||||
 | 
			
		||||
    Outputs:
 | 
			
		||||
        if `lm_labels` is not `None`:
 | 
			
		||||
            Outputs the language modeling loss.
 | 
			
		||||
        else:
 | 
			
		||||
            `lm_logits`: the language modeling logits as a torch.FloatTensor of size [batch_size, sequence_length, total_tokens_embeddings]
 | 
			
		||||
                (or more generally [d_1, ..., d_n, total_tokens_embeddings] were d_1 ... d_n are the dimension of input_ids)
 | 
			
		||||
 | 
			
		||||
    Example usage:
 | 
			
		||||
    ```python
 | 
			
		||||
    # Already been converted into BPE token ids
 | 
			
		||||
    input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
 | 
			
		||||
 | 
			
		||||
    config = modeling_openai.OpenAIGPTConfig()
 | 
			
		||||
 | 
			
		||||
    model = modeling_openai.OpenAIGPTLMHeadModel(config)
 | 
			
		||||
    lm_logits = model(input_ids)
 | 
			
		||||
    ```
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    def __init__(self, config):
 | 
			
		||||
        super(OpenAIGPTLMHeadModel, self).__init__(config)
 | 
			
		||||
        self.transformer = OpenAIGPTModel(config)
 | 
			
		||||
        self.lm_head = OpenAIGPTLMHead(self.transformer.tokens_embed.weight, config)
 | 
			
		||||
        self.apply(self.init_weights)
 | 
			
		||||
 | 
			
		||||
    def set_num_special_tokens(self, num_special_tokens):
 | 
			
		||||
        """ Update input and output embeddings with new embedding matrice
 | 
			
		||||
            Make sure we are sharing the embeddings
 | 
			
		||||
        """
 | 
			
		||||
        self.transformer.set_num_special_tokens(num_special_tokens)
 | 
			
		||||
        self.lm_head.set_embeddings_weights(self.transformer.tokens_embed.weight)
 | 
			
		||||
 | 
			
		||||
    def forward(self, input_ids, position_ids=None, token_type_ids=None, lm_labels=None):
 | 
			
		||||
        hidden_states = self.transformer(input_ids, position_ids, token_type_ids)
 | 
			
		||||
        lm_logits = self.lm_head(hidden_states)
 | 
			
		||||
        if lm_labels is not None:
 | 
			
		||||
            # Shift so that tokens < n predict n
 | 
			
		||||
            shift_logits = lm_logits[..., :-1, :].contiguous()
 | 
			
		||||
            shift_labels = lm_labels[..., 1:].contiguous()
 | 
			
		||||
            # Flatten the tokens
 | 
			
		||||
            loss_fct = CrossEntropyLoss(ignore_index=-1)
 | 
			
		||||
            loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)),
 | 
			
		||||
                            shift_labels.view(-1))
 | 
			
		||||
            return loss
 | 
			
		||||
        return lm_logits
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class OpenAIGPTDoubleHeadsModel(OpenAIGPTPreTrainedModel):
 | 
			
		||||
    """OpenAI GPT model with a Language Modeling and a Multiple Choice head ("Improving Language Understanding by Generative Pre-Training").
 | 
			
		||||
 | 
			
		||||
    OpenAI GPT use a single embedding matrix to store the word and special embeddings.
 | 
			
		||||
    Special tokens embeddings are additional tokens that are not pre-trained: [SEP], [CLS]...
 | 
			
		||||
    Special tokens need to be trained during the fine-tuning if you use them.
 | 
			
		||||
    The number of special embeddings can be controled using the `set_num_special_tokens(num_special_tokens)` function.
 | 
			
		||||
 | 
			
		||||
    The embeddings are ordered as follow in the token embeddings matrice:
 | 
			
		||||
        [0,                                                         ----------------------
 | 
			
		||||
         ...                                                        -> word embeddings
 | 
			
		||||
         config.vocab_size - 1,                                     ______________________
 | 
			
		||||
         config.vocab_size,
 | 
			
		||||
         ...                                                        -> special embeddings
 | 
			
		||||
         config.vocab_size + config.n_special - 1]                  ______________________
 | 
			
		||||
 | 
			
		||||
    where total_tokens_embeddings can be obtained as config.total_tokens_embeddings and is:
 | 
			
		||||
        total_tokens_embeddings = config.vocab_size + config.n_special
 | 
			
		||||
    You should use the associate indices to index the embeddings.
 | 
			
		||||
 | 
			
		||||
    Params:
 | 
			
		||||
        config: a OpenAIGPTConfig class instance with the configuration to build a new model
 | 
			
		||||
 | 
			
		||||
    Inputs:
 | 
			
		||||
        `input_ids`: a torch.LongTensor of shape [batch_size, num_choices, sequence_length] with the BPE token
 | 
			
		||||
            indices selected in the range [0, total_tokens_embeddings[
 | 
			
		||||
        `mc_token_ids`: a torch.LongTensor of shape [batch_size, num_choices] with the index of the token from
 | 
			
		||||
            which we should take the hidden state to feed the multiple choice classifier (usually last token of the sequence)
 | 
			
		||||
        `position_ids`: an optional torch.LongTensor with the same shape as input_ids
 | 
			
		||||
            with the position indices (selected in the range [0, config.n_positions - 1[.
 | 
			
		||||
        `token_type_ids`: an optional torch.LongTensor with the same shape as input_ids
 | 
			
		||||
            You can use it to add a third type of embedding to each input token in the sequence
 | 
			
		||||
            (the previous two being the word and position embeddings).
 | 
			
		||||
            The input, position and token_type embeddings are summed inside the Transformer before the first
 | 
			
		||||
            self-attention block.
 | 
			
		||||
        `lm_labels`: optional language modeling labels: torch.LongTensor of shape [batch_size, num_choices, sequence_length]
 | 
			
		||||
            with indices selected in [-1, 0, ..., total_tokens_embeddings]. All labels set to -1 are ignored (masked), the loss
 | 
			
		||||
            is only computed for the labels set in [0, ..., total_tokens_embeddings]
 | 
			
		||||
        `multiple_choice_labels`: optional multiple choice labels: torch.LongTensor of shape [batch_size]
 | 
			
		||||
            with indices selected in [0, ..., num_choices].
 | 
			
		||||
 | 
			
		||||
    Outputs:
 | 
			
		||||
        if `lm_labels` and `multiple_choice_labels` are not `None`:
 | 
			
		||||
            Outputs a tuple of losses with the language modeling loss and the multiple choice loss.
 | 
			
		||||
        else: a tuple with
 | 
			
		||||
            `lm_logits`: the language modeling logits as a torch.FloatTensor of size [batch_size, num_choices, sequence_length, total_tokens_embeddings]
 | 
			
		||||
            `multiple_choice_logits`: the multiple choice logits as a torch.FloatTensor of size [batch_size, num_choices]
 | 
			
		||||
 | 
			
		||||
    Example usage:
 | 
			
		||||
    ```python
 | 
			
		||||
    # Already been converted into BPE token ids
 | 
			
		||||
    input_ids = torch.LongTensor([[[31, 51, 99], [15, 5, 0]]])  # (bsz, number of choice, seq length)
 | 
			
		||||
    mc_token_ids = torch.LongTensor([[2], [1]]) # (bsz, number of choice)
 | 
			
		||||
 | 
			
		||||
    config = modeling_openai.OpenAIGPTConfig()
 | 
			
		||||
 | 
			
		||||
    model = modeling_openai.OpenAIGPTLMHeadModel(config)
 | 
			
		||||
    lm_logits, multiple_choice_logits = model(input_ids, mc_token_ids)
 | 
			
		||||
    ```
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    def __init__(self, config):
 | 
			
		||||
        super(OpenAIGPTDoubleHeadsModel, self).__init__(config)
 | 
			
		||||
        self.transformer = OpenAIGPTModel(config)
 | 
			
		||||
        self.lm_head = OpenAIGPTLMHead(self.transformer.tokens_embed.weight, config)
 | 
			
		||||
        self.multiple_choice_head = OpenAIGPTMultipleChoiceHead(config)
 | 
			
		||||
        self.apply(self.init_weights)
 | 
			
		||||
 | 
			
		||||
    def set_num_special_tokens(self, num_special_tokens):
 | 
			
		||||
        """ Update input and output embeddings with new embedding matrice
 | 
			
		||||
            Make sure we are sharing the embeddings
 | 
			
		||||
        """
 | 
			
		||||
        self.transformer.set_num_special_tokens(num_special_tokens)
 | 
			
		||||
        self.lm_head.set_embeddings_weights(self.transformer.tokens_embed.weight)
 | 
			
		||||
 | 
			
		||||
    def forward(self, input_ids, mc_token_ids, lm_labels=None, mc_labels=None, token_type_ids=None, position_ids=None):
 | 
			
		||||
        hidden_states = self.transformer(input_ids, position_ids, token_type_ids)
 | 
			
		||||
        lm_logits = self.lm_head(hidden_states)
 | 
			
		||||
        mc_logits = self.multiple_choice_head(hidden_states, mc_token_ids)
 | 
			
		||||
        losses = []
 | 
			
		||||
        if lm_labels is not None:
 | 
			
		||||
            shift_logits = lm_logits[..., :-1, :].contiguous()
 | 
			
		||||
            shift_labels = lm_labels[..., 1:].contiguous()
 | 
			
		||||
            loss_fct = CrossEntropyLoss(ignore_index=-1)
 | 
			
		||||
            losses.append(loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)))
 | 
			
		||||
        if mc_labels is not None:
 | 
			
		||||
            loss_fct = CrossEntropyLoss()
 | 
			
		||||
            losses.append(loss_fct(mc_logits.view(-1, mc_logits.size(-1)), mc_labels.view(-1)))
 | 
			
		||||
        if losses:
 | 
			
		||||
            return losses
 | 
			
		||||
        return lm_logits, mc_logits
 | 
			
		||||